[lc-checkins] CVS: linux/mm/comp_cache Makefile,1.8,1.9 adaptivity.c,1.40,1.41 aux.c,1.43,1.44 free.
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-11-22 16:01:53
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory sc8-pr-cvs1:/tmp/cvs-serv13256/mm/comp_cache Modified Files: Makefile adaptivity.c aux.c free.c main.c minilzo.c proc.c swapin.c swapout.c Log Message: Features o New clean page adaptability. This policy disables compression of clean pages when it is not worth it (i.e., most pages are compressed and freed, without being reclaimed to the system). o Two new configuration options to disable the whole adaptability policy and clean page adaptability separately. It was most used for some tests, but it might be useful for someone which has compressed caching performing not very well. Bug Fixes o Make the LZO code compile on Athlon systems o __read_comp_cache(): if a dirty fragment was supposed to be freed, it wouldn't be actually freed because we forgot to drop a reference on the fragment. Cleanups o Lots, mainly in adaptivity.c Index: Makefile =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/Makefile,v retrieving revision 1.8 retrieving revision 1.9 diff -C2 -r1.8 -r1.9 *** Makefile 29 May 2002 21:28:54 -0000 1.8 --- Makefile 22 Nov 2002 16:01:36 -0000 1.9 *************** *** 7,11 **** export-objs := swapin.o ! obj-y := main.o vswap.o free.o swapout.o swapin.o adaptivity.o aux.o proc.o WK4x4.o WKdm.o minilzo.o include $(TOPDIR)/Rules.make --- 7,15 ---- export-objs := swapin.o ! obj-y := main.o vswap.o free.o swapout.o swapin.o aux.o proc.o WK4x4.o WKdm.o minilzo.o ! ! ifneq ($(CONFIG_COMP_DIS_ADAPT),y) ! obj-y += adaptivity.o ! endif include $(TOPDIR)/Rules.make Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.40 retrieving revision 1.41 diff -C2 -r1.40 -r1.41 *** adaptivity.c 10 Sep 2002 16:43:20 -0000 1.40 --- adaptivity.c 22 Nov 2002 16:01:36 -0000 1.41 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-09-02 18:43:33 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-11-21 17:28:13 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 17,22 **** extern kmem_cache_t * comp_cachep; static int fragment_failed_alloc = 0, vswap_failed_alloc = 0; ! unsigned long failed_comp_page_allocs = 0; ! int growing_lock = 0; /* semaphore used to avoid two concurrent instances of --- 17,34 ---- extern kmem_cache_t * comp_cachep; static int fragment_failed_alloc = 0, vswap_failed_alloc = 0; ! int growth_lock = 0; ! ! /* clean page hash */ ! kmem_cache_t * clean_page_cachep; ! struct clean_page_data ** clean_page_hash; ! unsigned long clean_page_hash_size; ! unsigned int clean_page_hash_bits; ! ! /* clean page list */ ! LIST_HEAD(clean_page_list); ! unsigned long nr_clean_page_hash = 0; ! ! unsigned long nr_clean_page_hits = 0; ! unsigned long old_nr_clean_page_hits = 0; /* semaphore used to avoid two concurrent instances of *************** *** 24,29 **** static struct semaphore vswap_resize_semaphore; - extern void comp_cache_fix_watermarks(int); - void resize_fragment_hash_table(void) { --- 36,39 ---- *************** *** 466,481 **** } - static inline int - comp_cache_needs_to_shrink(void) { - /* obvious condition */ - if (new_num_comp_pages >= num_comp_pages) - return 0; - - if (vswap_num_reserved_entries > new_num_comp_pages) - return 0; - - return 1; - } - static inline void shrink_zone_watermarks(void) --- 476,479 ---- *************** *** 488,510 **** /*** ! * shrink_comp_cache(comp_page, check_further) - given a "comp_page" ! * entry, check if this page does not have fragments and if the ! * compressed cache need to be shrunk. ! * ! * In the case we can use the comp page to shrink the cache, release ! * it to the system, fixing all compressed cache data structures. ! * ! * @check_further: this parameter is used to distinguish between two ! * cases where we might be shrinking the case: user input to sysctl ! * entry or shrinking on demand. In the latter case, we want to simply ! * check the comp_page and free it if possible, we don't want to ! * perform an agressive shrinkage. * * caller must hold comp_cache_lock lock */ static int ! shrink_comp_cache(struct comp_cache_page * comp_page, int check_further) { - struct comp_cache_page * empty_comp_page; int retval = 0; --- 486,499 ---- /*** ! * shrink_comp_cache(comp_page) - given a "comp_page" entry, check if ! * this page does not have fragments, trying to release it to the ! * system in this case. After the page is released, all the compressed ! * cache data structures must be fixed accordingly. * * caller must hold comp_cache_lock lock */ static int ! shrink_comp_cache(struct comp_cache_page * comp_page) { int retval = 0; *************** *** 517,581 **** if (!list_empty(&(comp_page->fragments))) { UnlockPage(comp_page->page); - if (check_further) - goto check_shrink; - goto out; - } - - /* no need to shrink the cache */ - if (!comp_cache_needs_to_shrink()) { - UnlockPage(comp_page->page); goto out; } ! /* we need to shrink and have a empty page, so let's do it */ ! empty_comp_page = comp_page; retval = 1; ! shrink: ! remove_comp_page_from_hash_table(empty_comp_page); ! if (page_count(empty_comp_page->page) != 1) BUG(); ! UnlockPage(empty_comp_page->page); ! __free_pages(empty_comp_page->page, COMP_PAGE_ORDER); ! set_comp_page(empty_comp_page, NULL); ! kmem_cache_free(comp_cachep, (empty_comp_page)); num_comp_pages--; - #if 0 - printk("shrink new %lu real %lu\n", new_num_comp_pages, num_comp_pages); - #endif - - check_shrink: - if (!comp_cache_needs_to_shrink()) { - shrink_zone_watermarks(); - goto out; - } - - if (!fragment_failed_alloc && !vswap_failed_alloc) - goto check_empty_pages; out: shrink_fragment_hash_table(); shrink_vswap(); - out_unlock: spin_unlock(&comp_cache_lock); return retval; - - check_empty_pages: - /* let's look for empty compressed cache entries */ - empty_comp_page = search_comp_page(free_space_hash, PAGE_SIZE); - - if (!empty_comp_page || !empty_comp_page->page) - goto out_unlock; - - lock_page(empty_comp_page->page); - - /* we raced */ - if (!list_empty(&(comp_page->fragments))) { - UnlockPage(empty_comp_page->page); - goto out_unlock; - } - - goto shrink; } --- 506,533 ---- if (!list_empty(&(comp_page->fragments))) { UnlockPage(comp_page->page); goto out; } ! /* we have an empty page, so let's do it */ retval = 1; ! remove_comp_page_from_hash_table(comp_page); ! ! if (page_count(comp_page->page) != 1) BUG(); ! UnlockPage(comp_page->page); ! __free_pages(comp_page->page, COMP_PAGE_ORDER); ! set_comp_page(comp_page, NULL); ! kmem_cache_free(comp_cachep, comp_page); num_comp_pages--; + /* only change the zone watermarks if we shrunk the cache */ + shrink_zone_watermarks(); out: shrink_fragment_hash_table(); shrink_vswap(); spin_unlock(&comp_cache_lock); return retval; } *************** *** 589,592 **** --- 541,545 ---- shrink_on_demand(struct comp_cache_page * comp_page) { + /* don't shrink a comp cache that has reached the min size */ if (num_comp_pages == min_num_comp_pages) { UnlockPage(comp_page->page); *************** *** 594,613 **** } ! /* to force the shrink_comp_cache() to grow the cache */ ! new_num_comp_pages = num_comp_pages - 1; ! ! if (shrink_comp_cache(comp_page, 0)) { ! #if 0 ! printk("wow, it has shrunk %d\n", num_comp_pages); ! #endif return 1; - } - - new_num_comp_pages = num_comp_pages; return 0; } - #define comp_cache_needs_to_grow() (new_num_comp_pages > num_comp_pages) - static inline void grow_fragment_hash_table(void) { --- 547,555 ---- } ! if (shrink_comp_cache(comp_page)) return 1; return 0; } static inline void grow_fragment_hash_table(void) { *************** *** 630,682 **** } static int ! grow_comp_cache(int nrpages) { struct comp_cache_page * comp_page; struct page * page; ! int ret = 0; spin_lock(&comp_cache_lock); ! while (comp_cache_needs_to_grow() && nrpages--) { ! page = alloc_pages(GFP_ATOMIC, COMP_PAGE_ORDER); ! ! /* couldn't allocate the page */ ! if (!page) { ! failed_comp_page_allocs++; ! goto out_unlock; ! } ! ! if (!init_comp_page(&comp_page, page)) { ! __free_pages(page, COMP_PAGE_ORDER); ! goto out_unlock; ! } ! ! comp_cache_freeable_space += COMP_PAGE_SIZE; ! comp_cache_free_space += COMP_PAGE_SIZE; ! num_comp_pages++; ! if (num_comp_pages > max_used_num_comp_pages) ! max_used_num_comp_pages = num_comp_pages; ! #if 0 ! printk("grow real %lu\n", num_comp_pages); ! #endif ! } ! ! ret = 1; ! if (!comp_cache_needs_to_grow()) { ! grow_zone_watermarks(); ! goto grow_structures; } ! ! if (!fragment_failed_alloc && !vswap_failed_alloc) goto out_unlock; ! grow_structures: grow_fragment_hash_table(); grow_vswap(); out_unlock: spin_unlock(&comp_cache_lock); ! return ret; } --- 572,617 ---- } + /*** + * grow_comp_cache(void) - try to allocate a compressed cache page + * (may be 1 or 2 memory pages). If it is successful, initialize it, + * adding to the compressed cache. + */ static int ! grow_comp_cache(void) { struct comp_cache_page * comp_page; struct page * page; ! int retval = 0; spin_lock(&comp_cache_lock); ! page = alloc_pages(GFP_ATOMIC, COMP_PAGE_ORDER); ! /* couldn't allocate the page */ ! if (!page) { ! failed_comp_page_allocs++; ! goto out_unlock; } ! ! if (!init_comp_page(&comp_page, page)) { ! __free_pages(page, COMP_PAGE_ORDER); goto out_unlock; + } + + retval = 1; + + comp_cache_freeable_space += COMP_PAGE_SIZE; + comp_cache_free_space += COMP_PAGE_SIZE; + num_comp_pages++; ! if (num_comp_pages > max_used_num_comp_pages) ! max_used_num_comp_pages = num_comp_pages; ! ! grow_zone_watermarks(); grow_fragment_hash_table(); grow_vswap(); out_unlock: spin_unlock(&comp_cache_lock); ! return retval; } *************** *** 690,721 **** grow_on_demand(void) { if (num_comp_pages == max_num_comp_pages) return 0; ! if (growing_lock) return 0; ! /* to force the grow_comp_cache() to grow the cache */ ! new_num_comp_pages = num_comp_pages + 1; ! ! if (grow_comp_cache(1)) { ! #if 0 ! printk("wow, it has grown %d\n", num_comp_pages); ! #endif return 1; ! } ! ! new_num_comp_pages = num_comp_pages; return 0; } void compact_comp_cache(void) { ! struct comp_cache_page * comp_page, * previous_comp_page = NULL, * new_comp_page, ** hash_table = free_space_hash; struct comp_cache_fragment * fragment, * new_fragment; ! int i; next_fragment: i = free_space_hash_size - 1; do { --- 625,655 ---- grow_on_demand(void) { + /* don't grow a comp cache that has reached the max size */ if (num_comp_pages == max_num_comp_pages) return 0; ! /* if adaptability policy locked the growth, return */ ! if (growth_lock) return 0; ! if (grow_comp_cache()) return 1; ! return 0; } + #define writeout_one_fragment(gfp_mask) writeout_fragments(gfp_mask, 1, 6) + void compact_comp_cache(void) { ! struct comp_cache_page * comp_page, * previous_comp_page = NULL, * new_comp_page, ** hash_table; struct comp_cache_fragment * fragment, * new_fragment; ! struct list_head * fragment_lh; ! int i, fail; next_fragment: + hash_table = free_space_hash; + i = free_space_hash_size - 1; do { *************** *** 734,740 **** } ! fragment = list_entry(comp_page->fragments.prev, struct comp_cache_fragment, list); search_again: ! new_comp_page = search_comp_page(free_space_hash, fragment->compressed_size); if (new_comp_page && !TryLockPage(new_comp_page->page)) --- 668,692 ---- } ! fragment_lh = comp_page->fragments.prev; ! fail = 0; ! while (1) { ! fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); ! if (fragment_count(fragment) != 1) { ! fail = 1; ! goto next; ! } ! if (!CompFragmentToBeFreed(fragment)) ! break; ! next: ! fragment_lh = fragment_lh->prev; ! if (fragment_lh == &comp_page->fragments) { ! if (fail) ! goto out2_failed; ! UnlockPage(comp_page->page); ! return; ! } ! } search_again: ! new_comp_page = search_comp_page(hash_table, fragment->compressed_size); if (new_comp_page && !TryLockPage(new_comp_page->page)) *************** *** 815,822 **** UnlockPage(new_comp_page->page); goto next_fragment; - //return; writeout: ! writeout_fragments(GFP_KERNEL, 1, 6); return; --- 767,773 ---- UnlockPage(new_comp_page->page); goto next_fragment; writeout: ! writeout_one_fragment(GFP_KERNEL); return; *************** *** 829,833 **** UnlockPage(comp_page->page); goto writeout; - } --- 780,783 ---- *************** *** 851,858 **** --- 801,937 ---- } + #ifndef CONFIG_COMP_DIS_CLEAN + void + hit_clean_page(struct page * page) + { + struct clean_page_data * clpage; + + clpage = clean_page_hash[clean_page_hashfn(page->mapping, page->index)]; + + goto inside; + + for (;;) { + clpage = clpage->next_hash; + inside: + if (!clpage) + return; + if (clpage->mapping != page->mapping) + continue; + if (clpage->index == page->index) + break; + } + + /* mark it as hit */ + clpage->mapping = NULL; + nr_clean_page_hits++; + + /* if too many hits, try to store the clean pages */ + if (nr_clean_page_hits * 10 > clean_page_hash_size) { + clean_page_compress_lock = 0; + old_nr_clean_page_hits += nr_clean_page_hits; + nr_clean_page_hits = 0; + } + } + + void + add_clean_page(struct page * page) + { + struct clean_page_data * clpage, **old_clpage; + unsigned long hash_index; + + /* allocate a new structure */ + clpage = ((struct clean_page_data *) kmem_cache_alloc(clean_page_cachep, SLAB_ATOMIC)); + + if (unlikely(!clpage)) + return; + + clpage->mapping = page->mapping; + clpage->index = page->index; + + /* add to hash table...*/ + hash_index = clean_page_hashfn(page->mapping, page->index); + old_clpage = &clean_page_hash[hash_index]; + + if ((clpage->next_hash = *old_clpage)) + (*old_clpage)->pprev_hash = &clpage->next_hash; + + *old_clpage = clpage; + clpage->pprev_hash = old_clpage; + + /* and to the list */ + list_add(&clpage->list, &clean_page_list); + nr_clean_page_hash++; + + if (nr_clean_page_hash > clean_page_hash_size * 2) { + struct clean_page_data *next; + struct clean_page_data **pprev; + + clpage = list_entry(clean_page_list.prev, struct clean_page_data, list); + + /* remove from the list... */ + list_del(clean_page_list.prev); + + if (!clpage->mapping) { + if (old_nr_clean_page_hits) + old_nr_clean_page_hits--; + else + nr_clean_page_hits--; + } + + /* and from the hash table */ + next = clpage->next_hash; + pprev = clpage->pprev_hash; + + if (next) + next->pprev_hash = pprev; + *pprev = next; + clpage->pprev_hash = NULL; + + /* free the old structure */ + kmem_cache_free(clean_page_cachep, clpage); + + nr_clean_page_hash--; + } + + if (num_clean_fragments * 10 > num_fragments * 3) + compact_comp_cache(); + } + #endif + void __init comp_cache_adaptivity_init(void) { + unsigned int order; + init_MUTEX(&vswap_resize_semaphore); + + #ifndef CONFIG_COMP_DIS_CLEAN + /* clean pages hash table */ + clean_page_hash_size = comp_page_to_page(max_num_comp_pages)/7; + + for (order = 0; (PAGE_SIZE << order) < clean_page_hash_size; order++); + + do { + unsigned long tmp = (PAGE_SIZE << order)/sizeof(struct clean_page_data *); + + clean_page_hash_bits = 0; + while((tmp >>= 1UL) != 0UL) + clean_page_hash_bits++; + + clean_page_hash = (struct clean_page_data **) __get_free_pages(GFP_ATOMIC, order); + } while(clean_page_hash == NULL && --order > 0); + + clean_page_hash_size = 1 << clean_page_hash_bits; + + if (!clean_page_hash) + panic("comp_cache_adaptivity_init(): couldn't allocate clean page hash table\n"); + + memset((void *) clean_page_hash, 0, clean_page_hash_size * sizeof(struct clean_page_data *)); + + clean_page_cachep = kmem_cache_create("comp_cache_clean", sizeof(struct clean_page_data), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + printk("Compressed Cache: adaptivity\n" + "Compressed Cache: clean page (%lu entries = %luB)\n", clean_page_hash_size, PAGE_SIZE << order); + #endif } Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.43 retrieving revision 1.44 diff -C2 -r1.43 -r1.44 *** aux.c 10 Sep 2002 16:43:20 -0000 1.43 --- aux.c 22 Nov 2002 16:01:36 -0000 1.44 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-09-02 18:43:50 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-10-28 21:13:03 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 373,376 **** --- 373,378 ---- struct comp_cache_page * + FASTCALL(search_comp_page(struct comp_cache_page ** hash_table, int free_space)); + struct comp_cache_page * search_comp_page(struct comp_cache_page ** hash_table, int free_space) { struct comp_cache_page * comp_page; *************** *** 379,412 **** idx = free_space_hashfn(free_space); - if (idx == free_space_hash_size - 1) - goto check_exact_size; - /* first of all let's try to get at once a comp page whose * free space is surely bigger than what need */ ! i = idx + 1; ! do { ! comp_page = hash_table[i++]; ! } while(i < free_space_hash_size && !comp_page); ! ! /* couldn't find a page? let's check the pages whose free ! * space is linked in our hash key entry */ ! if (!comp_page) ! goto check_exact_size; - return comp_page; - - check_exact_size: comp_page = hash_table[idx]; ! if (hash_table == free_space_hash) { ! while (comp_page && comp_page->free_space < free_space) ! comp_page = comp_page->next_hash_fs; ! } ! else { ! while (comp_page && comp_page->total_free_space < free_space) ! comp_page = comp_page->next_hash_tfs; ! } ! ! return comp_page; } --- 381,413 ---- idx = free_space_hashfn(free_space); /* first of all let's try to get at once a comp page whose * free space is surely bigger than what need */ ! for (i = idx + 1; i < free_space_hash_size; i++) { ! if (hash_table[i]) ! return hash_table[i]; ! } comp_page = hash_table[idx]; + if (hash_table == free_space_hash) + goto inside_fs; + goto inside_tfs; ! for (;;) { ! comp_page = comp_page->next_hash_fs; ! inside_fs: ! if (!comp_page) ! return NULL; ! if (comp_page->free_space >= free_space) ! return comp_page; ! } ! ! for (;;) { ! comp_page = comp_page->next_hash_tfs; ! inside_tfs: ! if (!comp_page) ! return NULL; ! if (comp_page->total_free_space >= free_space) ! return comp_page; ! } } *************** *** 622,626 **** /* inits comp cache free space hash table */ free_space_interval = 100 * (COMP_PAGE_ORDER + 1); ! free_space_hash_size = (int) (PAGE_SIZE/100) + 2; free_space_hash = (struct comp_cache_page **) kmalloc(free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); --- 623,627 ---- /* inits comp cache free space hash table */ free_space_interval = 100 * (COMP_PAGE_ORDER + 1); ! free_space_hash_size = (int) (COMP_PAGE_SIZE/free_space_interval) + 2; free_space_hash = (struct comp_cache_page **) kmalloc(free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); *************** *** 635,639 **** /* inits comp cache total free space hash table */ total_free_space_interval = 100 * (COMP_PAGE_ORDER + 1); ! total_free_space_hash_size = (int) (PAGE_SIZE/100) + 2; total_free_space_hash = (struct comp_cache_page **) kmalloc(total_free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); --- 636,640 ---- /* inits comp cache total free space hash table */ total_free_space_interval = 100 * (COMP_PAGE_ORDER + 1); ! total_free_space_hash_size = (int) (COMP_PAGE_SIZE/free_space_interval) + 2; total_free_space_hash = (struct comp_cache_page **) kmalloc(total_free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.47 retrieving revision 1.48 diff -C2 -r1.47 -r1.48 *** free.c 10 Sep 2002 16:43:21 -0000 1.47 --- free.c 22 Nov 2002 16:01:37 -0000 1.48 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-08-21 17:57:52 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-10-25 11:26:26 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 248,255 **** comp_cache_free_locked(fragment); ! /* steal the page if we need to shrink the cache. The page ! * will be unlocked in shrink_comp_cache() (even if shrinking ! * on demand, shrink_on_demand() will call it anyway) */ shrink_on_demand(comp_page); } --- 248,261 ---- comp_cache_free_locked(fragment); ! #ifdef CONFIG_COMP_DIS_ADAPT ! UnlockPage(comp_page->page); ! #else ! /* *** adaptability policy *** ! * ! * Release the page to the system if it doesn't have another ! * fragments after the above fragment got just freed. ! */ shrink_on_demand(comp_page); + #endif } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.65 retrieving revision 1.66 diff -C2 -r1.65 -r1.66 *** main.c 10 Sep 2002 20:19:06 -0000 1.65 --- main.c 22 Nov 2002 16:01:37 -0000 1.66 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-09-10 17:03:33 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-10-25 08:54:11 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 24,33 **** unsigned long num_active_fragments = 0; unsigned long num_clean_fragments = 0; ! unsigned long init_num_comp_pages = 0; ! ! unsigned long new_num_comp_pages = 0; unsigned long max_num_comp_pages = 0; unsigned long min_num_comp_pages = 0; unsigned long max_used_num_comp_pages = 0; --- 24,34 ---- unsigned long num_active_fragments = 0; unsigned long num_clean_fragments = 0; + unsigned long failed_comp_page_allocs = 0; ! /* maximum number of pages that the compressed cache can use */ unsigned long max_num_comp_pages = 0; + /* minimum number of pages that the compressed cache can use */ unsigned long min_num_comp_pages = 0; + /* maximum number of pages ever used by the compressed cache */ unsigned long max_used_num_comp_pages = 0; *************** *** 112,115 **** --- 113,117 ---- copy_page: + /* if the page is already compressed, we just copy it */ if (PageCompressed(page)) { memcpy(page_address(comp_page->page) + fragment->offset, page_address(page) + comp_offset, comp_size); *************** *** 219,224 **** } - extern void __init comp_cache_init_fix_watermarks(int num_comp_pages); - void __init comp_cache_init(void) --- 221,224 ---- *************** *** 228,245 **** int i; ! max_used_num_comp_pages = init_num_comp_pages = min_num_comp_pages = page_to_comp_page(48); if (!max_num_comp_pages || max_num_comp_pages < min_num_comp_pages || max_num_comp_pages > num_physpages * 0.5) max_num_comp_pages = page_to_comp_page((unsigned long) (num_physpages * 0.5)); ! new_num_comp_pages = num_comp_pages = init_num_comp_pages; ! ! printk("Compressed Cache: %s\n", COMP_CACHE_VERSION); ! printk("Compressed Cache: maximum size\n" ! "Compressed Cache: %lu pages = %luKiB\n", max_num_comp_pages, (max_num_comp_pages * COMP_PAGE_SIZE) >> 10); /* fiz zone watermarks */ ! comp_cache_init_fix_watermarks(init_num_comp_pages); /* create slab caches */ --- 228,256 ---- int i; ! printk("Compressed Cache: %s\n", COMP_CACHE_VERSION); + #ifdef CONFIG_COMP_DIS_ADAPT + /* static compressed cache */ + min_num_comp_pages = page_to_comp_page(48); + if (!max_num_comp_pages || max_num_comp_pages < min_num_comp_pages || max_num_comp_pages > num_physpages * 0.5) max_num_comp_pages = page_to_comp_page((unsigned long) (num_physpages * 0.5)); ! max_used_num_comp_pages = num_comp_pages = max_num_comp_pages; ! printk("Compressed Cache: static size\n"); ! #else ! /* adaptive compressed cache */ ! max_used_num_comp_pages = min_num_comp_pages = num_comp_pages = page_to_comp_page(48); ! ! if (!max_num_comp_pages || max_num_comp_pages < min_num_comp_pages || max_num_comp_pages > num_physpages * 0.5) ! max_num_comp_pages = page_to_comp_page((unsigned long) (num_physpages * 0.5)); ! ! printk("Compressed Cache: maximum size\n"); ! #endif ! printk("Compressed Cache: %lu pages = %luKiB\n", max_num_comp_pages, (max_num_comp_pages * COMP_PAGE_SIZE) >> 10); /* fiz zone watermarks */ ! comp_cache_fix_watermarks(num_comp_pages); /* create slab caches */ *************** *** 266,270 **** --- 277,283 ---- comp_cache_algorithms_init(); + #ifndef CONFIG_COMP_DIS_ADAPT comp_cache_adaptivity_init(); + #endif } Index: minilzo.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/minilzo.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** minilzo.c 10 Sep 2002 20:19:06 -0000 1.3 --- minilzo.c 22 Nov 2002 16:01:40 -0000 1.4 *************** *** 54,61 **** #endif ! #if !defined(LZO_NO_SYS_TYPES_H) ! # include <linux/types.h> ! #endif ! //#include <stdio.h> #ifndef __LZO_CONF_H --- 54,61 ---- #endif ! /* #if !defined(LZO_NO_SYS_TYPES_H) */ ! /* # include <linux/types.h> */ ! /* #endif */ ! /* #include <stdio.h> */ #ifndef __LZO_CONF_H *************** *** 76,80 **** #if !defined(LZO_HAVE_CONFIG_H) ! # include <linux/stddef.h> # include <linux/string.h> # define HAVE_MEMCMP --- 76,80 ---- #if !defined(LZO_HAVE_CONFIG_H) ! # include <stddef.h> # include <linux/string.h> # define HAVE_MEMCMP *************** *** 324,328 **** #if defined(__LZO_DOS16) || defined(__LZO_WIN16) //# include <dos.h> ! # if 1 && defined(__WATCOMC__) //# include <i86.h> __LZO_EXTERN_C unsigned char _HShift; --- 324,328 ---- #if defined(__LZO_DOS16) || defined(__LZO_WIN16) //# include <dos.h> ! #if 1 && defined(__WATCOMC__) //# include <i86.h> __LZO_EXTERN_C unsigned char _HShift; Index: proc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v retrieving revision 1.28 retrieving revision 1.29 diff -C2 -r1.28 -r1.29 *** proc.c 12 Sep 2002 15:11:31 -0000 1.28 --- proc.c 22 Nov 2002 16:01:41 -0000 1.29 *************** *** 2,6 **** * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-09-12 11:42:20 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-10-21 16:26:52 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 54,58 **** static int algorithm_min = WKDM_IDX; static int algorithm_max = LZO_IDX; ! static int algorithm_idx = 0; struct stats_summary * stats = &compression_algorithm.stats; --- 54,58 ---- static int algorithm_min = WKDM_IDX; static int algorithm_max = LZO_IDX; ! static int algorithm_idx = -1; struct stats_summary * stats = &compression_algorithm.stats; *************** *** 61,65 **** static spinlock_t comp_data_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; ! int clean_page_compress_lock = 1; inline void --- 61,65 ---- static spinlock_t comp_data_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; ! int clean_page_compress_lock = 0; inline void *************** *** 153,157 **** #if 0 ! if (state == CLEAN_PAGE && clean_page_compress_lock) { comp_size = PAGE_SIZE; comp_cache_update_comp_stats(comp_size, page); --- 153,157 ---- #if 0 ! if (state == CLEAN_PAGE && clean_page_compress_lock) { // && (num_clean_fragments * 5 < num_fragments)) { comp_size = PAGE_SIZE; comp_cache_update_comp_stats(comp_size, page); *************** *** 261,265 **** comp_cache_algorithms_init(void) { ! if (!algorithm_idx || algorithm_idx < algorithm_min || algorithm_idx > algorithm_max) algorithm_idx = LZO_IDX; --- 261,265 ---- comp_cache_algorithms_init(void) { ! if (algorithm_idx == -1 || algorithm_idx < algorithm_min || algorithm_idx > algorithm_max) algorithm_idx = LZO_IDX; *************** *** 406,410 **** total1 = free_space_count(0, array_num_fragments); - length += sprintf(page + length, "total %lu act %lu pages %lu\n", num_fragments, num_active_fragments, num_comp_pages << COMP_PAGE_ORDER); length += sprintf(page + length, " %4d: %7lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", --- 406,409 ---- Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.54 retrieving revision 1.55 diff -C2 -r1.54 -r1.55 *** swapin.c 10 Sep 2002 16:43:24 -0000 1.54 --- swapin.c 22 Nov 2002 16:01:42 -0000 1.55 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-09-10 10:36:42 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-11-21 15:23:30 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 18,25 **** #include <asm/uaccess.h> ! #define ACTIVE_FRAGMENT 1 #define INACTIVE_FRAGMENT 0 ! int last_accessed = 0, last_state_accessed = 0; int --- 18,25 ---- #include <asm/uaccess.h> ! #define ACTIVE_FRAGMENT 1 #define INACTIVE_FRAGMENT 0 ! int last_state_accessed = 0; int *************** *** 60,63 **** --- 60,64 ---- if (CompFragmentTestandClearDirty(fragment)) { + num_clean_fragments++; list_del(&fragment->mapping_list); list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); *************** *** 80,85 **** __read_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page, int state) { struct comp_cache_fragment * fragment; ! int err, ratio; if (!PageLocked(page)) --- 81,87 ---- __read_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page, int state) { + static int adapt_ratio = 2, last_accessed = 0; struct comp_cache_fragment * fragment; ! int err; if (!PageLocked(page)) *************** *** 99,183 **** get_fragment(fragment); ! #if 0 ! if (CompFragmentDirty(fragment)) { ! //if (last_state_accessed > 0) ! // last_state_accessed = -1; ! //else ! last_state_accessed--; ! ratio = -3; //-(((num_fragments - num_clean_fragments) * 4)/num_fragments?:0); ! if (last_state_accessed < ratio) { ! clean_page_compress_lock = 1; ! last_state_accessed = 0; ! } ! goto test_active; ! } ! ! //if (last_state_accessed < 0) ! // last_state_accessed = 1; ! //else ! last_state_accessed++; ! ratio = 3; //((num_clean_fragments * 4)/num_fragments?:0); ! if (last_state_accessed > ratio) { ! clean_page_compress_lock = 0; ! last_state_accessed = 0; ! } ! ! test_active: ! #endif ! ! #if 0 ! if (!CompFragmentDirty(fragment)) { last_state_accessed++; - ratio = 3; //((num_clean_fragments * 4)/num_fragments?:0); - if (last_state_accessed > ratio) { - clean_page_compress_lock = 0; - last_state_accessed = 0; - } #endif - if (CompFragmentActive(fragment)) {// || !CompFragmentDirty(fragment)) { - if (last_accessed == ACTIVE_FRAGMENT) { #if 0 ! /* -- VERSÃO 3 -- */ ! if (growing_lock) { ! compact_comp_cache(); ! //writeout_fragments(GFP_KERNEL, 1, SHRINKAGE_PRIORITY); ! last_accessed = INACTIVE_FRAGMENT; ! goto read; ! } ! growing_lock = 1; ! goto read; #endif ! #if 1 ! /* -- VERSÃO 2 -- */ ! if (growing_lock) { compact_comp_cache(); ! //writeout_fragments(GFP_KERNEL, 1, SHRINKAGE_PRIORITY); ! growing_lock = 0; last_accessed = INACTIVE_FRAGMENT; goto read; } ! growing_lock = 1; ! goto read; ! #endif ! ! #if 0 ! /* -- VERSÂO 1 -- */ ! writeout_fragments(GFP_KERNEL, 1, SHRINKAGE_PRIORITY); ! growing_lock = 1; ! last_accessed = INACTIVE_FRAGMENT; goto read; - #endif } last_accessed = ACTIVE_FRAGMENT; - goto read; - } - - /* inactive fragment */ - growing_lock = 0; - last_accessed = INACTIVE_FRAGMENT; read: /* If only dirty fragmenst should be returned (when reading * the page for writing it), free the fragment and return. A --- 101,170 ---- get_fragment(fragment); ! #ifndef CONFIG_COMP_DIS_CLEAN ! /* *** clean fragment policy *** ! * ! * All clean fragments read must account as +1 to ! * last_state_accessed variable. These fragments are only ! * accounted when we are not compressing clean pages ! * (clean_page_compress_lock == 1). ! */ ! if (!CompFragmentDirty(fragment) && !clean_page_compress_lock) last_state_accessed++; #endif #if 0 ! #ifndef CONFIG_COMP_DIS_ADAPT ! /* -- version 4 -- */ ! if (CompFragmentActive(fragment)) { ! /* fragments from compcache active list */ ! last_accessed++; ! if (last_accessed >= adapt_ratio) ! growth_lock = 1; ! if (last_accessed >= 2 * adapt_ratio) { ! compact_comp_cache(); ! ! growth_lock = 0; ! last_accessed = 0; ! } ! } ! else { ! /* fragments from compcache inactive list */ ! last_accessed--; ! if (last_accessed <= (-1 * adapt_ratio)) { ! growth_lock = 0; ! last_accessed = 0; ! } ! } #endif ! #endif ! #if 1 ! #ifndef CONFIG_COMP_DIS_ADAPT ! /* -- version 2 -- */ ! if (CompFragmentActive(fragment)) { ! /* fragments from compcache active list */ ! if (last_accessed == ACTIVE_FRAGMENT) { ! if (growth_lock) { compact_comp_cache(); ! growth_lock = 0; last_accessed = INACTIVE_FRAGMENT; goto read; } ! growth_lock = 1; goto read; } last_accessed = ACTIVE_FRAGMENT; + /* Ver alair1 */ + /* compact_comp_cache(); */ + } + else { + /* fragments from compcache inactive list */ + growth_lock = 0; + last_accessed = INACTIVE_FRAGMENT; + } read: + #endif + #endif /* If only dirty fragmenst should be returned (when reading * the page for writing it), free the fragment and return. A *************** *** 185,188 **** --- 172,176 ---- * is no point decompressing a clean fragment. */ if (CompFragmentDirty(fragment) && state == DIRTY_PAGE) { + put_fragment(fragment); drop_fragment(fragment); goto out_unlock; *************** *** 197,202 **** spin_lock(&comp_cache_lock); ! if (CompFragmentTestandClearDirty(fragment)) ! __set_page_dirty(page); UnlockPage(fragment->comp_page->page); --- 185,192 ---- spin_lock(&comp_cache_lock); ! if (CompFragmentTestandClearDirty(fragment)) { ! num_clean_fragments++; ! __set_page_dirty(page); ! } UnlockPage(fragment->comp_page->page); Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.72 retrieving revision 1.73 diff -C2 -r1.72 -r1.73 *** swapout.c 12 Sep 2002 15:11:31 -0000 1.72 --- swapout.c 22 Nov 2002 16:01:45 -0000 1.73 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-09-12 11:42:33 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-10-25 11:26:59 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 361,372 **** maxscan = max((int) ((num_fragments - num_active_fragments)/priority), (int) (nrpages * 2)); ! if (!list_empty(&inactive_lru_queue)) ! goto scan; ! ! active_list: ! list = &active_lru_queue; ! maxscan = max((int) (num_active_fragments/priority), (int) (nrpages * 2)); - scan: while (!list_empty(list) && maxscan--) { fragment = list_entry(fragment_lh = list->prev, struct comp_cache_fragment, lru_queue); --- 361,369 ---- maxscan = max((int) ((num_fragments - num_active_fragments)/priority), (int) (nrpages * 2)); ! if (list_empty(&inactive_lru_queue)) { ! list = &active_lru_queue; ! maxscan = max((int) (num_active_fragments/priority), (int) (nrpages * 2)); ! } while (!list_empty(list) && maxscan--) { fragment = list_entry(fragment_lh = list->prev, struct comp_cache_fragment, lru_queue); *************** *** 381,393 **** /* clean page, let's free it */ if (!CompFragmentDirty(fragment)) { ! #if 0 ! //if (last_state_accessed > 0) ! //last_state_accessed = -1; ! //else ! last_state_accessed--; ! ratio = -2; //(((num_fragments - num_clean_fragments) * 6)/num_fragments?:0); ! if (last_state_accessed < ratio) { ! clean_page_compress_lock = 1; ! last_state_accessed = 0; } #endif --- 378,399 ---- /* clean page, let's free it */ if (!CompFragmentDirty(fragment)) { ! #ifndef CONFIG_COMP_DIS_CLEAN ! /* *** clean fragment policy *** ! * ! * All clean fragments to be freed accounts as ! * -1 to last_state_accessed variable. These ! * fragments are only accounted while we are ! * compressing clean pages ! * (clean_page_compress_lock == 1). ! */ ! if (!clean_page_compress_lock) { ! last_state_accessed--; ! ratio = -((num_clean_fragments * 40)/num_fragments); ! if (ratio > -5) ! ratio = -5; ! if (last_state_accessed < ratio) { ! clean_page_compress_lock = 1; ! last_state_accessed = 0; ! } } #endif *************** *** 399,416 **** goto try_again; } ! ! #if 0 ! //if (last_state_accessed < 0) ! //last_state_accessed = 1; ! //else ! last_state_accessed++; ! ratio = 2; //((num_clean_fragments * 6)/num_fragments?:0); ! if (last_state_accessed > ratio) { ! clean_page_compress_lock = 0; ! last_state_accessed = 0; ! } ! #endif ! ! /* we can't perform IO, so we can't go on */ if (!(gfp_mask & __GFP_FS)) --- 405,409 ---- goto try_again; } ! /* we can't perform IO, so we can't go on */ if (!(gfp_mask & __GFP_FS)) *************** *** 444,447 **** --- 437,441 ---- CompFragmentClearDirty(fragment); + num_clean_fragments++; writepage = fragment->mapping->a_ops->writepage; *************** *** 480,490 **** } - #if 0 - if (nrpages) { - if (list == &inactive_lru_queue && (num_active_fragments * 4 > num_fragments * 3)) - goto active_list; - } - #endif - return (!nrpages); } --- 474,477 ---- *************** *** 524,528 **** --- 511,519 ---- page_cache_get(page); + #ifndef CONFIG_COMP_DIS_ADAPT maxtry = 5; + #else + maxtry = 4; + #endif hash_table = free_space_hash; *************** *** 591,601 **** hash_table = free_space_hash; ! /*** ! * We couldn't find a comp page with enough free ! * space, so let's first check if we are supposed and ! * are able to grow the compressed cache on demand */ if (grow_on_demand()) continue; if (!writeout_fragments(gfp_mask, SWAP_CLUSTER_MAX, priority)) --- 582,595 ---- hash_table = free_space_hash; ! #ifndef CONFIG_COMP_DIS_ADAPT ! /* *** adaptability policy *** ! * ! * We couldn't find a comp page with enough free space ! * to store the new fragment. Let's then check if we ! * are able to grow the compressed cache on demand. */ if (grow_on_demand()) continue; + #endif if (!writeout_fragments(gfp_mask, SWAP_CLUSTER_MAX, priority)) |