[lc-checkins] CVS: linux/include/linux comp_cache.h,1.93,1.94
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-07-17 20:44:39
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv16293/include/linux Modified Files: comp_cache.h Log Message: Features o First implementation of support for SMP systems. There are only two spinlocks used for that, but the goal at the moment is stability, not performance. With our first tests, it is working without corruption on a system with preempt patch, but only swap cache support (and without resizing compressed cache). Let the first races show up :-) As soon as the whole code is working somewhat well, those global locks will be divided into many other to improve concurrency. Bug fixes o fixed compilation error when compressed cache is disabled Cleanups o removed virtual_swap_count() since it wasn't used (swap_count() isn't used either). Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.93 retrieving revision 1.94 diff -C2 -r1.93 -r1.94 *** comp_cache.h 17 Jul 2002 13:00:57 -0000 1.93 --- comp_cache.h 17 Jul 2002 20:44:36 -0000 1.94 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-17 08:48:48 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-17 16:23:16 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 352,355 **** --- 352,357 ---- #define COMP_PAGE_SIZE ((comp_page_order + 1) * PAGE_SIZE) + #define comp_cache_used_space ((num_comp_pages * PAGE_SIZE) - comp_cache_free_space) + #define page_to_comp_page(n) ((n) >> comp_page_order) #define comp_page_to_page(n) ((n) << comp_page_order) *************** *** 357,361 **** extern int comp_page_order; extern unsigned long comp_cache_free_space; ! #define comp_cache_used_space ((num_comp_pages * PAGE_SIZE) - comp_cache_free_space) #else static inline void comp_cache_init(void) {}; --- 359,363 ---- extern int comp_page_order; extern unsigned long comp_cache_free_space; ! extern spinlock_t comp_cache_lock; #else static inline void comp_cache_init(void) {}; *************** *** 411,421 **** int virtual_swap_duplicate(swp_entry_t); int virtual_swap_free(unsigned long); - int virtual_swap_count(swp_entry_t); swp_entry_t get_virtual_swap_page(void); ! inline int comp_cache_available_space(void); ! ! inline void set_vswap_allocating(swp_entry_t entry); ! inline void clear_vswap_allocating(swp_entry_t entry); extern void FASTCALL(add_pte_vswap(pte_t *, swp_entry_t)); --- 413,419 ---- int virtual_swap_duplicate(swp_entry_t); int virtual_swap_free(unsigned long); swp_entry_t get_virtual_swap_page(void); ! int comp_cache_available_space(void); extern void FASTCALL(add_pte_vswap(pte_t *, swp_entry_t)); *************** *** 438,441 **** --- 436,441 ---- vswap_address[SWP_OFFSET(entry)]->fault_count--; } + + extern spinlock_t virtual_swap_list; #else *************** *** 446,457 **** static inline int virtual_swap_duplicate(swp_entry_t entry) { return 0; }; static inline int virtual_swap_free(unsigned long offset) { return 0; } - static inline int virtual_swap_count(swp_entry_t entry) { return 0; } static inline swp_entry_t get_virtual_swap_page(void) { return (swp_entry_t) { 0 }; } static inline int comp_cache_available_space(void) { return 0; } - static inline void set_vswap_allocating(swp_entry_t entry) { }; - static inline void clear_vswap_allocating(swp_entry_t entry) { }; - static inline void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {}; static inline void remove_pte_vswap(pte_t * ptep) {}; --- 446,453 ---- *************** *** 459,462 **** --- 455,460 ---- static inline void del_swap_cache_page_vswap(struct page * page) {}; static inline int free_pte_list(struct pte_list * pte_list, unsigned long offset) { return 0; } + static inline void get_vswap(swp_entry_t entry) {}; + static inline void put_vswap(swp_entry_t entry) {}; #endif |