Thread: [lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.28,1.29 free.c,1.32,1.33 main.c,1.47,1.48 swap
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-06-25 14:34:11
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv13268/mm/comp_cache Modified Files: adaptivity.c free.c main.c swapin.c swapout.c vswap.c Log Message: Feature o Implemented support for resizing the compressed cache on demand. The user defines the maximum compressed cache size and compressed cache will grow up to this size if necessary. Only then it will start swapping out fragments. And when the compressed cache entries start to get empty, their pages will be released to the system, decreasing compressed cache size. Still have to solve some issues about resizing vswap. o Changed most of the calls from comp_cache_free_locked() to comp_cache_free(), in order to release the page if necessary. Only calls from writeout functions were not changed since we don't want to use those pages to shrink the compressed cache. Bug fixes o Fixed potential oops in comp_cache_use_address(). If the ptes cannot be set to the new address, we would access a null variable (fragment). o Fixed bug in swap in process for virtual swap addresses. While allocating a new page, that virtual swap address might get unused (it gained a real address or vswap table got shrunk), what could lead to a BUG() in comp_cache_swp_duplicate(). Other o Some comments added to functions in adaptivity.c o Updated Configure.help for CONFIG_COMP_CACHE Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.28 retrieving revision 1.29 diff -C2 -r1.28 -r1.29 *** adaptivity.c 20 Jun 2002 14:28:49 -0000 1.28 --- adaptivity.c 25 Jun 2002 14:34:07 -0000 1.29 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-06-20 10:59:52 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-06-25 10:32:29 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 18,21 **** --- 18,25 ---- static int fragment_failed_alloc = 0, vswap_failed_alloc = 0; + /* semaphore used to avoid two concurrent instances of + * {grow,shrink}_vswap() functions to run together */ + static struct semaphore vswap_resize_semaphore; + extern void comp_cache_fix_watermarks(int); *************** *** 25,29 **** unsigned long new_fragment_hash_size; unsigned int i, new_fragment_hash_bits, new_fragment_hash_order, hash_index; ! new_fragment_hash_size = 3 * num_comp_pages * sizeof(struct comp_cache_fragment *); new_fragment_hash = create_fragment_hash(&new_fragment_hash_size, &new_fragment_hash_bits, &new_fragment_hash_order); --- 29,33 ---- unsigned long new_fragment_hash_size; unsigned int i, new_fragment_hash_bits, new_fragment_hash_order, hash_index; ! new_fragment_hash_size = 3 * num_comp_pages * sizeof(struct comp_cache_fragment *); new_fragment_hash = create_fragment_hash(&new_fragment_hash_size, &new_fragment_hash_bits, &new_fragment_hash_order); *************** *** 77,86 **** extern kmem_cache_t * vswap_cachep; static int wait_scan = 0; /*** ! * shrink_vswap(unsigned long) - shrinks vswap adressing table from ! * its current size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, ! * its new size in function of num_comp_pages. * * we try to shrink the vswap at once, but that will depend on getting --- 81,91 ---- extern kmem_cache_t * vswap_cachep; + extern unsigned long nr_free_vswap; static int wait_scan = 0; /*** ! * shrink_vswap(void) - shrinks vswap adressing table from its current ! * size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, its new size ! * in function of num_comp_pages. * * we try to shrink the vswap at once, but that will depend on getting *************** *** 108,119 **** */ void ! shrink_vswap(unsigned long vswap_new_num_entries) { struct page * swap_cache_page; struct comp_cache_fragment * fragment; struct vswap_address ** new_vswap_address; ! unsigned int total_scan = 0, failed_scan = 0, failed_alloc = 0; ! unsigned long index, new_index; swp_entry_t old_entry, entry; if (vswap_current_num_entries <= vswap_new_num_entries) --- 113,140 ---- */ void ! shrink_vswap(void) { struct page * swap_cache_page; struct comp_cache_fragment * fragment; struct vswap_address ** new_vswap_address; ! unsigned int failed_alloc = 0; ! unsigned long index, new_index, vswap_new_num_entries = NUM_VSWAP_ENTRIES; swp_entry_t old_entry, entry; + + if (!vswap_address) + return; + + if (vswap_current_num_entries <= 1.10 * NUM_VSWAP_ENTRIES) + return; + /* more used entries than the new size? can't shrink */ + if (vswap_num_used_entries >= NUM_VSWAP_ENTRIES) + return; + + if (down_trylock(&vswap_resize_semaphore)) + return; + + #if 0 + printk("shrinking\n"); + #endif if (vswap_current_num_entries <= vswap_new_num_entries) *************** *** 133,145 **** for (index = vswap_last_used; index >= vswap_new_num_entries; index--) { ! /* we have already freed this entry for shrink */ if (!vswap_address[index]) continue; /* unused entry? let's only free it */ if (!vswap_address[index]->count) { list_del(&(vswap_address[index]->list)); kmem_cache_free(vswap_cachep, (vswap_address[index])); vswap_address[index] = NULL; continue; } --- 154,185 ---- for (index = vswap_last_used; index >= vswap_new_num_entries; index--) { ! /* either this entry has already been freed or hasn't ! * been sucessfully allocated */ if (!vswap_address[index]) continue; + /* we are shrinking this vswap table from a function + * which is freeing a vswap entry, so forget this + * entry. The same for the case this entry is in the + * middle of a swapin process (allocating a new + * page) */ + if (freeing(index) || allocating(index)) + continue; + /* unused entry? let's only free it */ if (!vswap_address[index]->count) { list_del(&(vswap_address[index]->list)); + nr_free_vswap--; kmem_cache_free(vswap_cachep, (vswap_address[index])); vswap_address[index] = NULL; + + /* time to fix the last_vswap_allocated (we + * may not reach the point where it will be + * updated) */ + if (index <= last_vswap_allocated) + last_vswap_allocated = index - 1; + #if 0 + printk("null %d\n", index); + #endif continue; } *************** *** 164,179 **** * boundary to link this used entry we are moving * down */ ! for (; new_index > 0 && vswap_address[new_index]->count; new_index--); ! /* we must have a new index, otherwise ! * vswap_needs_to_shrink() is broken */ if (!new_index) ! BUG(); old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, index); entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, new_index); - total_scan++; - /* let's fix the ptes */ if (!set_pte_list_to_entry(vswap_address[index]->pte_list, old_entry, entry)) --- 204,227 ---- * boundary to link this used entry we are moving * down */ ! while (new_index > 0) { ! if (!vswap_address[new_index]) ! break; ! ! if (freeing(new_index)) ! goto next; ! ! if (!vswap_address[new_index]->count) ! break; ! next: ! new_index--; ! } ! if (!new_index) ! goto backout; old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, index); entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, new_index); /* let's fix the ptes */ if (!set_pte_list_to_entry(vswap_address[index]->pte_list, old_entry, entry)) *************** *** 195,205 **** } ! list_del(&(vswap_address[new_index]->list)); ! kmem_cache_free(vswap_cachep, (vswap_address[new_index])); ! vswap_address[new_index] = NULL; ! ! if (vswap_address[new_index]) ! BUG(); vswap_address[new_index] = vswap_address[index]; vswap_address[new_index]->offset = new_index; --- 243,257 ---- } ! if (vswap_address[new_index]) { ! list_del(&(vswap_address[new_index]->list)); ! nr_free_vswap--; ! kmem_cache_free(vswap_cachep, (vswap_address[new_index])); ! vswap_address[new_index] = NULL; ! } + #if 0 + printk("vswap %lu -> %lu\n", index, new_index); + #endif + vswap_address[new_index] = vswap_address[index]; vswap_address[new_index]->offset = new_index; *************** *** 210,218 **** backout: - failed_scan++; if (swap_cache_page) UnlockPage(swap_cache_page); if (fragment && !reserved(index)) UnlockPage(fragment->comp_page->page); } --- 262,270 ---- backout: if (swap_cache_page) UnlockPage(swap_cache_page); if (fragment && !reserved(index)) UnlockPage(fragment->comp_page->page); + break; } *************** *** 222,226 **** continue; ! if (!vswap_address[vswap_last_used]->count && vswap_last_used >= vswap_new_num_entries) BUG(); --- 274,280 ---- continue; ! if (!vswap_address[vswap_last_used]->count ! && vswap_last_used >= vswap_new_num_entries ! && !freeing(vswap_last_used)) BUG(); *************** *** 228,239 **** } ! if (vswap_last_used >= vswap_new_num_entries) { ! /* if we failed all tries to find the vmas, it's ! * better wait for a while before trying again, since ! * the call might be coming from mmput() */ ! if (total_scan > 0 && total_scan == failed_scan) ! wait_scan = total_scan * 2; ! return; ! } allocate_new_vswap: --- 282,287 ---- } ! if (vswap_last_used >= vswap_new_num_entries) ! goto out; allocate_new_vswap: *************** *** 242,246 **** if (!new_vswap_address) { vswap_failed_alloc = 1; ! return; } --- 290,294 ---- if (!new_vswap_address) { vswap_failed_alloc = 1; ! goto out; } *************** *** 290,296 **** vswap_last_used = vswap_new_num_entries - 1; vswap_failed_alloc = 0; } ! /* grow_vswap(void) - grows vswap adressing table from its current * size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, its new size * in function of num_comp_pages. --- 338,347 ---- vswap_last_used = vswap_new_num_entries - 1; vswap_failed_alloc = 0; + out: + up(&vswap_resize_semaphore); } ! /*** ! * grow_vswap(void) - grows vswap adressing table from its current * size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, its new size * in function of num_comp_pages. *************** *** 300,308 **** * new ones), updating some control variables to conclude. */ ! void ! grow_vswap(unsigned long vswap_new_num_entries) { struct vswap_address ** new_vswap_address; unsigned int i, failed_alloc = 0; if (vswap_last_used >= vswap_new_num_entries - 1) BUG(); --- 351,376 ---- * new ones), updating some control variables to conclude. */ ! static void ! grow_vswap(void) { struct vswap_address ** new_vswap_address; unsigned int i, failed_alloc = 0; + unsigned long vswap_new_num_entries = NUM_VSWAP_ENTRIES; + + if (!vswap_address) + return; + + /* using vswap_last_used instead of vswap_current_num_entries + * forces us to grow the cache even if we started shrinking + * it, but one set comp cache to the original size */ + if (vswap_last_used >= 0.90 * (NUM_VSWAP_ENTRIES - 1)) + return; + + if (down_trylock(&vswap_resize_semaphore)) + return; + #if 0 + printk("growing\n"); + #endif + if (vswap_last_used >= vswap_new_num_entries - 1) BUG(); *************** *** 315,319 **** if (!new_vswap_address) { vswap_failed_alloc = 1; ! return; } --- 383,387 ---- if (!new_vswap_address) { vswap_failed_alloc = 1; ! goto out; } *************** *** 359,363 **** vswap_last_used = vswap_new_num_entries - 1; vswap_failed_alloc = 0; ! return; fix_old_vswap: --- 427,431 ---- vswap_last_used = vswap_new_num_entries - 1; vswap_failed_alloc = 0; ! goto out; fix_old_vswap: *************** *** 378,385 **** last_vswap_allocated = vswap_new_num_entries - 1; vswap_last_used = vswap_current_num_entries - 1; } ! static inline int ! fragment_hash_needs_to_shrink(void) { unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(struct comp_cache_fragment *); --- 446,455 ---- last_vswap_allocated = vswap_new_num_entries - 1; vswap_last_used = vswap_current_num_entries - 1; + out: + up(&vswap_resize_semaphore); } ! static inline void ! shrink_fragment_hash_table(void) { unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(struct comp_cache_fragment *); *************** *** 387,414 **** * there? if they won't, no need to shrink the hash table */ if ((PAGE_SIZE << (fragment_hash_order - 1)) < new_fragment_hash_size) ! return 0; ! ! return 1; ! } ! ! static inline int ! vswap_needs_to_shrink(void) { ! if (!vswap_address) ! return 0; ! ! if (vswap_current_num_entries <= NUM_VSWAP_ENTRIES) ! return 0; ! ! /* more used entries than the new size? can't shrink */ ! if (vswap_num_used_entries >= NUM_VSWAP_ENTRIES) ! return 0; ! ! /* failed a lot in the last tries? let's wait for a while */ ! if (wait_scan) { ! wait_scan--; ! return 0; ! } ! return 1; } --- 457,463 ---- * there? if they won't, no need to shrink the hash table */ if ((PAGE_SIZE << (fragment_hash_order - 1)) < new_fragment_hash_size) ! return; ! resize_fragment_hash_table(); } *************** *** 425,444 **** } ! static inline int ! zone_wrong_watermarks_shrink(void) { ! return (zone_num_comp_pages > num_comp_pages); } int ! shrink_comp_cache(struct comp_cache_page * comp_page) { struct comp_cache_page * empty_comp_page; int retval = 0; /* if the comp_page is not empty, can't free it */ ! if (!list_empty(&(comp_page->fragments))) { UnlockPage(comp_page->page); ! goto check_shrink; } --- 474,515 ---- } ! static inline void ! shrink_zone_watermarks(void) { ! if (zone_num_comp_pages <= num_comp_pages) ! return; ! ! comp_cache_fix_watermarks(num_comp_pages); } + /*** + * shrink_comp_cache(comp_page, check_further) - given a "comp_page" + * entry, check if this page does not have fragments and if the + * compressed cache need to be shrunk. + * + * In the case we can use the comp page to shrink the cache, release + * it to the system, fixing all compressed cache data structures. + * + * @check_further: this parameter is used to distinguish between two + * cases where we might be shrinking the case: user input to sysctl + * entry or shrinking on demand. In the latter case, we want to simply + * check the comp_page and free it if possible, we don't want to + * perform an agressive shrinkage. + */ int ! shrink_comp_cache(struct comp_cache_page * comp_page, int check_further) { struct comp_cache_page * empty_comp_page; int retval = 0; + + if (!comp_page->page) + BUG(); /* if the comp_page is not empty, can't free it */ ! if (!list_empty(&(comp_page->fragments))) { UnlockPage(comp_page->page); ! if (check_further) ! goto check_shrink; ! goto out; } *************** *** 466,484 **** check_shrink: ! if (comp_cache_needs_to_shrink()) { ! if (!fragment_failed_alloc && !vswap_failed_alloc) ! goto check_empty_pages; ! } ! else { ! if (zone_wrong_watermarks_shrink()) ! comp_cache_fix_watermarks(num_comp_pages); } out: ! if (fragment_hash_needs_to_shrink()) ! resize_fragment_hash_table(); ! ! if (vswap_needs_to_shrink()) ! shrink_vswap(NUM_VSWAP_ENTRIES); return retval; --- 537,551 ---- check_shrink: ! if (!comp_cache_needs_to_shrink()) { ! shrink_zone_watermarks(); ! goto out; } + + if (!fragment_failed_alloc && !vswap_failed_alloc) + goto check_empty_pages; out: ! shrink_fragment_hash_table(); ! shrink_vswap(); return retval; *************** *** 502,549 **** } #define comp_cache_needs_to_grow() (new_num_comp_pages > num_comp_pages) ! static inline int ! fragment_hash_needs_to_grow(void) { unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(struct comp_cache_fragment *); /* do we really need a bigger hash table? */ if ((PAGE_SIZE << fragment_hash_order) >= new_fragment_hash_size) ! return 0; ! ! return 1; ! } ! ! static inline int ! vswap_needs_to_grow(void) { ! if (!vswap_address) ! return 0; ! ! /* using vswap_last_used instead of vswap_current_num_entries ! * forces us to grow the cache even if we started shrinking ! * it, but one set comp cache to the original size */ ! if (vswap_last_used >= NUM_VSWAP_ENTRIES - 1) ! return 0; ! return 1; } ! static inline int ! zone_wrong_watermarks_grow(void) { ! return (zone_num_comp_pages < num_comp_pages); } ! inline void ! grow_comp_cache(zone_t * zone, int nr_pages) { struct comp_cache_page * comp_page; struct page * page; - /* we only care about the pages freed in normal zone since all - * the allocations we make are GFP_KERNEL */ - if (zone != &(zone->zone_pgdat->node_zones[ZONE_NORMAL])) - return; - while (comp_cache_needs_to_grow() && nr_pages--) { page = alloc_page(GFP_ATOMIC); --- 569,630 ---- } + #ifdef CONFIG_COMP_DEMAND_RESIZE + /*** + * shrink_on_demand(comp_page) - called by comp_cache_free(), it will + * try to shrink the compressed cache by one entry (comp_page). The + * comp_cache_free() function is called by every place that free a + * compressed cache fragment but swap out functions. + */ + int + shrink_on_demand(struct comp_cache_page * comp_page) + { + if (num_comp_pages == min_num_comp_pages) { + UnlockPage(comp_page->page); + return 0; + } + + /* to force the shrink_comp_cache() to grow the cache */ + new_num_comp_pages = num_comp_pages - 1; + + if (shrink_comp_cache(comp_page, 0)) { + #if 0 + printk("wow, it has shrunk %d\n", num_comp_pages); + #endif + return 1; + } + + new_num_comp_pages = num_comp_pages; + return 0; + } + #endif + #define comp_cache_needs_to_grow() (new_num_comp_pages > num_comp_pages) ! static inline void ! grow_fragment_hash_table(void) { unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(struct comp_cache_fragment *); /* do we really need a bigger hash table? */ if ((PAGE_SIZE << fragment_hash_order) >= new_fragment_hash_size) ! return; ! resize_fragment_hash_table(); } ! static inline void ! grow_zone_watermarks(void) { ! if (zone_num_comp_pages >= num_comp_pages) ! return; ! ! comp_cache_fix_watermarks(num_comp_pages); } ! int ! grow_comp_cache(int nr_pages) { struct comp_cache_page * comp_page; struct page * page; while (comp_cache_needs_to_grow() && nr_pages--) { page = alloc_page(GFP_ATOMIC); *************** *** 551,555 **** /* couldn't allocate the page */ if (!page) ! return; init_comp_page(&comp_page, page); --- 632,636 ---- /* couldn't allocate the page */ if (!page) ! return 0; init_comp_page(&comp_page, page); *************** *** 563,580 **** } ! if (comp_cache_needs_to_grow()) { ! if (!fragment_failed_alloc && !vswap_failed_alloc) ! return; ! } ! else { ! if (zone_wrong_watermarks_grow()) ! comp_cache_fix_watermarks(num_comp_pages); } ! if (fragment_hash_needs_to_grow()) ! resize_fragment_hash_table(); ! ! if (vswap_needs_to_grow()) ! grow_vswap(NUM_VSWAP_ENTRIES); } --- 644,694 ---- } ! if (!comp_cache_needs_to_grow()) { ! grow_zone_watermarks(); ! goto out; } + + if (!fragment_failed_alloc && !vswap_failed_alloc) + return 1; + + out: + grow_fragment_hash_table(); + grow_vswap(); ! return 1; ! } ! ! #ifdef CONFIG_COMP_DEMAND_RESIZE ! /*** ! * grow_on_demand(void) - called by get_comp_cache_page() when it ! * cannot find space in the compressed cache. If compressed cache has ! * not yet reached the maximum size, we try to grow compressed cache ! * by one new entry. ! */ ! int ! grow_on_demand(void) ! { ! if (num_comp_pages == max_num_comp_pages) ! return 0; ! ! /* to force the grow_comp_cache() to grow the cache */ ! new_num_comp_pages = num_comp_pages + 1; ! ! if (grow_comp_cache(1)) { ! #if 0 ! printk("wow, it has grown %d\n", num_comp_pages); ! #endif ! return 1; ! } ! ! new_num_comp_pages = num_comp_pages; ! return 0; ! } ! #endif ! ! void __init ! comp_cache_adaptivity_init(void) ! { ! init_MUTEX(&vswap_resize_semaphore); } Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** free.c 19 Jun 2002 12:18:44 -0000 1.32 --- free.c 25 Jun 2002 14:34:07 -0000 1.33 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-06-19 08:46:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-06-24 18:13:13 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 166,176 **** comp_cache_free(struct comp_cache_fragment * fragment) { struct comp_cache_page * comp_page; ! int locked; if (!fragment) BUG(); ! comp_page = fragment->comp_page; ! locked = !TryLockPage(comp_page->page); comp_cache_free_locked(fragment); --- 166,180 ---- comp_cache_free(struct comp_cache_fragment * fragment) { struct comp_cache_page * comp_page; ! struct page * page; ! int locked = 0; if (!fragment) BUG(); ! comp_page = fragment->comp_page; ! if (comp_page->page) { ! locked = !TryLockPage(comp_page->page); ! page = comp_page->page; ! } comp_cache_free_locked(fragment); *************** *** 179,184 **** * page will be unlocked in shrink_comp_cache() * function */ ! if (locked) ! shrink_comp_cache(comp_page); } --- 183,193 ---- * page will be unlocked in shrink_comp_cache() * function */ ! if (locked) { ! #ifdef CONFIG_COMP_DEMAND_RESIZE ! shrink_on_demand(comp_page); ! #else ! shrink_comp_cache(comp_page, 1); ! #endif ! } } *************** *** 232,235 **** --- 241,246 ---- return 0; + fragment = vswap->fragment; + /* set old virtual addressed ptes to the real swap entry */ if (!set_pte_list_to_entry(vswap->pte_list, old_entry, entry)) *************** *** 244,248 **** swap_duplicate(entry); - fragment = vswap->fragment; remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); --- 255,258 ---- Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.47 retrieving revision 1.48 diff -C2 -r1.47 -r1.48 *** main.c 20 Jun 2002 14:28:49 -0000 1.47 --- main.c 25 Jun 2002 14:34:07 -0000 1.48 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-06-20 11:01:24 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-06-24 18:14:59 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 178,182 **** #endif ! comp_cache_free_locked(fragment); PageClearMappedCompCache(old_page); --- 178,182 ---- #endif ! comp_cache_free(fragment); PageClearMappedCompCache(old_page); *************** *** 264,267 **** --- 264,268 ---- extern void __init comp_cache_swp_buffer_init(void); extern void __init comp_cache_vswap_init(void); + extern void __init comp_cache_adaptivity_init(void); LIST_HEAD(lru_queue); *************** *** 285,290 **** int i; ! max_num_comp_pages = num_physpages * 0.5; min_num_comp_pages = num_physpages * 0.05; if (!init_num_comp_pages || init_num_comp_pages < min_num_comp_pages || init_num_comp_pages > max_num_comp_pages) --- 286,297 ---- int i; ! #ifdef CONFIG_COMP_DEMAND_RESIZE ! min_num_comp_pages = 48; ! #else min_num_comp_pages = num_physpages * 0.05; + #endif + + if (!max_num_comp_pages || max_num_comp_pages < min_num_comp_pages || max_num_comp_pages > num_physpages * 0.5) + max_num_comp_pages = num_physpages * 0.5; if (!init_num_comp_pages || init_num_comp_pages < min_num_comp_pages || init_num_comp_pages > max_num_comp_pages) *************** *** 319,332 **** /* initialize our algorithms statistics array */ comp_cache_algorithms_init(); } static int __init comp_cache_size(char *str) { char * endp; - unsigned long long comp_cache_size; /* size in bytes */ ! comp_cache_size = memparse(str, &endp); ! init_num_comp_pages = comp_cache_size >> PAGE_SHIFT; ! return 1; } --- 326,343 ---- /* initialize our algorithms statistics array */ comp_cache_algorithms_init(); + + comp_cache_adaptivity_init(); } + static int __init comp_cache_size(char *str) { char * endp; ! #ifdef CONFIG_COMP_DEMAND_RESIZE ! max_num_comp_pages = memparse(str, &endp) >> PAGE_SHIFT; ! #else ! init_num_comp_pages = memparse(str, &endp) >> PAGE_SHIFT; ! #endif return 1; } Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.41 retrieving revision 1.42 diff -C2 -r1.41 -r1.42 *** swapin.c 20 Jun 2002 14:28:50 -0000 1.41 --- swapin.c 25 Jun 2002 14:34:08 -0000 1.42 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-06-20 11:00:45 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-06-22 15:19:52 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 120,124 **** __set_page_dirty(page); ! comp_cache_free_locked(fragment); #endif --- 120,124 ---- __set_page_dirty(page); ! comp_cache_free(fragment); #endif *************** *** 223,229 **** if (TryLockPage(fragment->comp_page->page)) BUG(); - decompress_fragment(fragment, page); ! comp_cache_free_locked(fragment); PageClearCompCache(page); --- 223,230 ---- if (TryLockPage(fragment->comp_page->page)) BUG(); decompress_fragment(fragment, page); ! UnlockPage(fragment->comp_page->page); ! ! comp_cache_free(fragment); PageClearCompCache(page); *************** *** 231,235 **** page_cache_release(page); - UnlockPage(fragment->comp_page->page); UnlockPage(page); return; --- 232,235 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.52 retrieving revision 1.53 diff -C2 -r1.52 -r1.53 *** swapout.c 19 Jun 2002 18:10:20 -0000 1.52 --- swapout.c 25 Jun 2002 14:34:08 -0000 1.53 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-06-19 11:34:35 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-06-22 14:55:33 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 446,451 **** --- 446,465 ---- } + /*** + * We couldn't find a comp page with enough free + * space, so let's first check if we are supposed and + * are able to grow the compressed cache on demand + */ + if (grow_on_demand()) + continue; + UnlockPage(page); + /*** + * We didn't grow the compressed cache, thus it's time + * to check if we able to free any fragment which was + * waiting for IO completion. If we can't free any + * fragment, it's time to write out some fragments. + */ if (!refill_swp_buffer(gfp_mask, 1, priority)) writeout_fragments(gfp_mask, priority--); Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.37 retrieving revision 1.38 diff -C2 -r1.37 -r1.38 *** vswap.c 20 Jun 2002 12:33:58 -0000 1.37 --- vswap.c 25 Jun 2002 14:34:08 -0000 1.38 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-06-20 09:04:04 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-06-24 18:24:11 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 59,62 **** --- 59,109 ---- unsigned short last_page = 0; + unsigned long nr_free_vswap = 0, nr_used_vswap = 0; + + /*** + * Lock this vswap entry since it has a new page being allocated. That + * avoids this entry to be moved either when vswap is shrunk or to + * gain a new real swap entry. This sort of vswap entry does not have + * a swap cache page, so this is the field used to set this flag. + */ + inline void + set_vswap_allocating(swp_entry_t entry) + { + unsigned long offset = SWP_OFFSET(entry); + struct vswap_address * vswap; + + if (!vswap_address(entry)) + return; + if (offset >= vswap_current_num_entries) + BUG(); + vswap = vswap_address[offset]; + + if (vswap->swap_cache_page) + BUG(); + + vswap->swap_cache_page = VSWAP_ALLOCATING; + } + + /*** + * Clear the allocating flag of this vswap entry. + */ + inline void + clear_vswap_allocating(swp_entry_t entry) + { + unsigned long offset = SWP_OFFSET(entry); + struct vswap_address * vswap; + + if (!vswap_address(entry)) + return; + if (offset >= vswap_current_num_entries) + BUG(); + vswap = vswap_address[offset]; + + if (vswap->swap_cache_page != VSWAP_ALLOCATING) + BUG(); + + vswap->swap_cache_page = NULL; + } + static int comp_cache_vswap_alloc(void) *************** *** 120,124 **** if (list_empty(&vswap_address_free_head)) { /* have all vswap addresses already been allocated? */ ! if (last_vswap_allocated == NUM_VSWAP_ENTRIES - 1) return 0; --- 167,171 ---- if (list_empty(&vswap_address_free_head)) { /* have all vswap addresses already been allocated? */ ! if (last_vswap_allocated == vswap_current_num_entries - 1) return 0; *************** *** 130,134 **** return 0; ! for (i = last_vswap_allocated + 1; i < NUM_VSWAP_ENTRIES && vswap_address[i]; i++); last_vswap_allocated = i - 1; --- 177,181 ---- return 0; ! for (i = last_vswap_allocated + 1; i < vswap_current_num_entries && vswap_address[i]; i++); last_vswap_allocated = i - 1; *************** *** 199,202 **** --- 246,250 ---- vswap = list_entry(vswap_address_free_head.next, struct vswap_address, list); list_del_init(vswap_address_free_head.next); + nr_free_vswap--; type = COMP_CACHE_SWP_TYPE; *************** *** 275,279 **** struct comp_cache_fragment * fragment; struct vswap_address * vswap; - struct page * page; if (!vswap_address(entry)) --- 323,326 ---- *************** *** 295,299 **** if (--count) { vswap->count = count; ! goto out; } --- 342,346 ---- if (--count) { vswap->count = count; ! return count; } *************** *** 310,314 **** vswap->swap_cache_page = NULL; ! /* if this entry is reserved, it's not in any list (either * because it has never had a fragment or the fragment has * already been remove in remove_fragment_vswap()), so we can --- 357,361 ---- vswap->swap_cache_page = NULL; ! /* if this entry is reserved, it's not on any list (either * because it has never had a fragment or the fragment has * already been remove in remove_fragment_vswap()), so we can *************** *** 316,320 **** if (fragment == VSWAP_RESERVED) { vswap_num_reserved_entries--; ! goto add_to_free_list; } --- 363,371 ---- if (fragment == VSWAP_RESERVED) { vswap_num_reserved_entries--; ! vswap->fragment = NULL; ! list_add(&(vswap->list), &vswap_address_free_head); ! nr_free_vswap++; ! ! return 0; } *************** *** 322,338 **** BUG(); ! page = fragment->comp_page->page; ! if (TryLockPage(page)) ! BUG(); ! comp_cache_free_locked(fragment); ! UnlockPage(page); ! ! vswap_num_reserved_entries--; ! add_to_free_list: ! vswap->fragment = NULL; list_add(&(vswap->list), &vswap_address_free_head); ! out: ! return count; } --- 373,391 ---- BUG(); ! /* remove from used list */ ! list_del_init(&(vswap_address[offset]->list)); ! nr_used_vswap--; ! vswap->fragment = VSWAP_FREEING; ! comp_cache_freeable_space += fragment->compressed_size; ! ! comp_cache_free(fragment); ! ! /* add to to the free list */ list_add(&(vswap->list), &vswap_address_free_head); ! nr_free_vswap++; ! ! vswap->fragment = NULL; ! return 0; } *************** *** 386,393 **** offset = SWP_OFFSET(entry); if (reserved(offset) || !vswap_address[offset]->fragment) BUG(); ! vswap_address[offset]->fragment = VSWAP_RESERVED; --- 439,451 ---- offset = SWP_OFFSET(entry); + + /* if we are freeing this vswap, don't have to worry since it + * will be handled by comp_cache_swp_free() function */ + if (freeing(offset)) + return; if (reserved(offset) || !vswap_address[offset]->fragment) BUG(); ! vswap_address[offset]->fragment = VSWAP_RESERVED; *************** *** 396,399 **** --- 454,458 ---- * address */ list_del_init(&(vswap_address[offset]->list)); + nr_used_vswap--; comp_cache_freeable_space += fragment->compressed_size; *************** *** 439,442 **** --- 498,502 ---- list_add(&(vswap_address[offset]->list), &vswap_address_used_head); + nr_used_vswap++; comp_cache_freeable_space -= fragment->compressed_size; *************** *** 642,646 **** offset = SWP_OFFSET(entry); ! if (vswap_address[offset]->swap_cache_page) BUG(); --- 702,707 ---- offset = SWP_OFFSET(entry); ! if (vswap_address[offset]->swap_cache_page && ! vswap_address[offset]->swap_cache_page != VSWAP_ALLOCATING) BUG(); *************** *** 706,709 **** --- 767,771 ---- list_add(&(vswap_address[offset]->list), &vswap_address_free_head); + nr_free_vswap++; return 1; } |