[lc-checkins] CVS: linux/include/linux comp_cache.h,1.40,1.41
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-01-14 12:05:11
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv13325/include/linux Modified Files: comp_cache.h Log Message: This batch of changes still includes lots of cleanups and code rewrite to make it simpler. Perfomance increase has been noticed too. - number_of_pages in comp_cache_t removed. We can check if there are no fragments by fragments list. - vswap: no semaphore is needed. I have no idea why the functions {lock,unlock}_vswap has once been added. I can't see why they are needed. So they were removed. The same for real_entry field in struct vswap_address. - vswap: a new function has been added, namely add_fragment_vswap(), analogous to remove_fragment_vswap(). It's called from get_comp_cache_page() and it's a great hand to make things modular. - vm_enough_memory(): now we take into account compressed cache space when allowing an application to allocate memory. That is done calling a function named comp_cache_free_space() which returns, based upon the estimated_free_space, the number of pages that still can be compressed. - move_and_fix_fragments() deleted. comp_cache_free() has a new police to not move data to and fro all the time like before. We free the fragment but leave it there waiting for being merged with the free space. It's pretty simple, check the code. The new code has two new functions: merge_right_neighbour() and merge_left_neighbour(). - the fragments list is kept sorted by offset field. So, when freeing, we don't have to search for the next and previous fragments everytime. Since most of times it's just a plain list_add_tail() in get_comp_cache_page(), that makes the code simpler and nicer. - lookup_comp_cache() was partially rewritten, mainly due to the fact we won't sleep to get a lock on the comp_page. - find_and_lock_comp_page() function removed and find_nolock_comp_page() was renamed to find_comp_page(). All functions that previously called find_and_lock... now calls the find_comp_page() and locks the comp_page at once with TryLockPage(). - oom_kill() was fixed and takes into account the free space in compressed cache by calling comp_cache_available_space(). That avoids killing an application if we have space left in compressed cache yet. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.40 retrieving revision 1.41 diff -C2 -r1.40 -r1.41 *** comp_cache.h 2002/01/10 12:39:30 1.40 --- comp_cache.h 2002/01/14 12:05:08 1.41 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-08 16:09:01 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-14 08:49:45 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 24,28 **** #include <asm/bitops.h> #include <asm/page.h> - #include <asm/semaphore.h> #include <linux/shmem_fs.h> --- 24,27 ---- *************** *** 64,70 **** /* fields for compression structure */ - unsigned short number_of_pages; unsigned short free_offset; - short free_space; --- 63,67 ---- *************** *** 118,121 **** --- 115,119 ---- #define CompFragmentSetIO(fragment) set_bit(CF_IO, &(fragment)->flags) #define CompFragmentTestandSetIO(fragment) test_and_set_bit(CF_IO, &(fragment)->flags) + #define CompFragmentTestandClearIO(fragment) test_and_clear_bit(CF_IO, &(fragment)->flags) #define CompFragmentClearIO(fragment) clear_bit(CF_IO, &(fragment)->flags) *************** *** 134,138 **** #define INF 0xffffffff ! #define NUM_SWP_BUFFERS (pager_daemon.swap_cluster * (1 << page_cluster)) /* do not change the fields order */ --- 132,136 ---- #define INF 0xffffffff ! #define NUM_SWP_BUFFERS 128 /* do not change the fields order */ *************** *** 351,356 **** struct list_head list; - swp_entry_t real_entry; - unsigned short count; unsigned short offset; --- 349,352 ---- *************** *** 361,365 **** struct pte_list * pte_list; - struct semaphore sem; }; --- 357,360 ---- *************** *** 375,380 **** #define vswap_info_struct(p) (p == &swap_info[COMP_CACHE_SWP_TYPE]) #define vswap_address(entry) (SWP_TYPE(entry) == COMP_CACHE_SWP_TYPE) - #define real_swap_address(offset) (vswap_address[offset]->real_entry.val) - #define vswap_locked(offset) (down_trylock(&vswap_address[offset]->sem)) #define reserved(offset) (vswap_address[offset]->fragment == VSWAP_RESERVED) --- 370,373 ---- *************** *** 394,400 **** inline void del_swap_cache_page_vswap(struct page *); - inline void lock_vswap(swp_entry_t); - inline void unlock_vswap(swp_entry_t); - #else --- 387,390 ---- *************** *** 428,434 **** static inline void del_swap_cache_page_vswap(struct page * page) {}; - static inline void lock_vswap(swp_entry_t entry) {}; - static inline void unlock_vswap(swp_entry_t entry) {}; - #endif --- 418,421 ---- *************** *** 444,448 **** /* aux.c */ unsigned long long big_division(unsigned long long, unsigned long long); ! inline comp_cache_t * find_nolock_comp_page(swp_entry_t, comp_cache_fragment_t **); comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); --- 431,435 ---- /* aux.c */ unsigned long long big_division(unsigned long long, unsigned long long); ! inline comp_cache_t * find_comp_page(swp_entry_t, comp_cache_fragment_t **); comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); *************** *** 476,479 **** --- 463,479 ---- inline void add_fragment_to_lru_queue(comp_cache_fragment_t *); inline void remove_fragment_from_lru_queue(comp_cache_fragment_t *); + + /* enough memory functions */ + #ifdef CONFIG_COMP_CACHE + inline int comp_cache_free_space(void); + #else + + static inline int comp_cache_free_space(void) + { + return 0; + } + + #endif + /* proc.c */ |