[lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.33,1.34 aux.c,1.35,1.36 free.c,1.33,1.34 main.
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-07-11 19:08:15
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv16722/mm/comp_cache Modified Files: adaptivity.c aux.c free.c main.c proc.c swapout.c Log Message: Feature o New proc entry (comp_cache_frag), showing the fragmentation in the compressed cache. o Every struct comp_cache_struct is added to two hash tables: free space and total free space. The former is the old one, showing the amount of free space that can be used right away. The latter shows the total free space, ie it also accounts the fragmented space. Thus, if there is a page with total space enough for a new fragment, we compact this page and return it to be used by the new fragment. The two tables are set up the same way. o Added back feature removed in 0.23pre9 due to a bug fix. That feature would allow pages to be compressed even when the gfp_mask does not allow. Now it is back and it won't write out a page if the gfp_mask does not allow. This feature allows a better use of the compressed cache space. Cleanup o Removed dirty parameter from get_comp_cache_page(). o Better descriptions of /proc/comp_cache_{hist,frag}. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** adaptivity.c 5 Jul 2002 15:21:54 -0000 1.33 --- adaptivity.c 11 Jul 2002 19:08:11 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-04 13:59:28 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-09 16:33:37 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 554,558 **** check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_comp_page_free_space(PAGE_SIZE); if (!empty_comp_page || !empty_comp_page->page) --- 554,558 ---- check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_comp_page(free_space_hash, PAGE_SIZE); if (!empty_comp_page || !empty_comp_page->page) Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.35 retrieving revision 1.36 diff -C2 -r1.35 -r1.36 *** aux.c 1 Jul 2002 21:36:50 -0000 1.35 --- aux.c 11 Jul 2002 19:08:11 -0000 1.36 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 18:04:55 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-11 15:45:38 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 13,17 **** #include <linux/pagemap.h> #include <linux/init.h> ! #include <linux/vmalloc.h> struct comp_cache_fragment ** fragment_hash; --- 13,17 ---- #include <linux/pagemap.h> #include <linux/init.h> ! #include <linux/slab.h> struct comp_cache_fragment ** fragment_hash; *************** *** 21,28 **** unsigned int fragment_hash_bits; ! static struct comp_cache_page ** free_space_hash; unsigned int free_space_hash_size; unsigned int free_space_interval; /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long --- 21,32 ---- unsigned int fragment_hash_bits; ! struct comp_cache_page ** free_space_hash; unsigned int free_space_hash_size; unsigned int free_space_interval; + struct comp_cache_page ** total_free_space_hash; + unsigned int total_free_space_hash_size; + unsigned int total_free_space_interval; + /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long *************** *** 198,202 **** BUG(); ! for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash, total++) { total_fragments = 0; --- 202,206 ---- BUG(); ! for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash_fs, total++) { total_fragments = 0; *************** *** 251,280 **** } inline void add_comp_page_to_hash_table(struct comp_cache_page * new_comp_page) { struct comp_cache_page ** comp_page; comp_page = &free_space_hash[free_space_hashfn(new_comp_page->free_space)]; ! if ((new_comp_page->next_hash = *comp_page)) ! (*comp_page)->pprev_hash = &new_comp_page->next_hash; *comp_page = new_comp_page; ! new_comp_page->pprev_hash = comp_page; } inline void remove_comp_page_from_hash_table(struct comp_cache_page * comp_page) { ! struct comp_cache_page *next = comp_page->next_hash; ! struct comp_cache_page **pprev = comp_page->pprev_hash; if (next) ! next->pprev_hash = pprev; *pprev = next; ! comp_page->pprev_hash = NULL; } struct comp_cache_page * ! search_comp_page_free_space(int free_space) { struct comp_cache_page * comp_page; int idx, i; --- 255,339 ---- } + unsigned long + fragmentation_count(int index, unsigned long * frag_space) { + struct comp_cache_page * comp_page; + struct comp_cache_fragment * fragment; + struct list_head * fragment_lh; + unsigned long total, fragmented_space; + + if (index < 0) + BUG(); + + if (index >= free_space_hash_size) + BUG(); + + for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash_fs, total++) { + fragmented_space = 0; + + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + + if (CompFragmentFreed(fragment)) + fragmented_space += fragment->compressed_size; + } + + if (fragmented_space + comp_page->free_space != comp_page->total_free_space) + BUG(); + + frag_space[(int) fragmented_space/500]++; + } + + return total; + } + inline void add_comp_page_to_hash_table(struct comp_cache_page * new_comp_page) { struct comp_cache_page ** comp_page; + /* add to free space hash table */ comp_page = &free_space_hash[free_space_hashfn(new_comp_page->free_space)]; ! if ((new_comp_page->next_hash_fs = *comp_page)) ! (*comp_page)->pprev_hash_fs = &new_comp_page->next_hash_fs; ! ! *comp_page = new_comp_page; ! new_comp_page->pprev_hash_fs = comp_page; ! ! /* add to total free space hash table */ ! comp_page = &total_free_space_hash[free_space_hashfn(new_comp_page->total_free_space)]; ! ! if ((new_comp_page->next_hash_tfs = *comp_page)) ! (*comp_page)->pprev_hash_tfs = &new_comp_page->next_hash_tfs; *comp_page = new_comp_page; ! new_comp_page->pprev_hash_tfs = comp_page; } inline void remove_comp_page_from_hash_table(struct comp_cache_page * comp_page) { ! struct comp_cache_page *next; ! struct comp_cache_page **pprev; + /* remove from free space hash table */ + next = comp_page->next_hash_fs; + pprev = comp_page->pprev_hash_fs; + if (next) ! next->pprev_hash_fs = pprev; *pprev = next; ! comp_page->pprev_hash_fs = NULL; ! ! /* remove from total free space hash table */ ! next = comp_page->next_hash_tfs; ! pprev = comp_page->pprev_hash_tfs; ! ! if (next) ! next->pprev_hash_tfs = pprev; ! *pprev = next; ! comp_page->pprev_hash_tfs = NULL; } struct comp_cache_page * ! search_comp_page(struct comp_cache_page ** hash_table, int free_space) { struct comp_cache_page * comp_page; int idx, i; *************** *** 289,293 **** i = idx + 1; do { ! comp_page = free_space_hash[i++]; } while(i < free_space_hash_size && !comp_page); --- 348,352 ---- i = idx + 1; do { ! comp_page = hash_table[i++]; } while(i < free_space_hash_size && !comp_page); *************** *** 300,307 **** check_exact_size: ! comp_page = free_space_hash[idx]; ! while (comp_page && comp_page->free_space < free_space) ! comp_page = comp_page->next_hash; return comp_page; --- 359,372 ---- check_exact_size: ! comp_page = hash_table[idx]; ! if (hash_table == free_space_hash) { ! while (comp_page && comp_page->free_space < free_space) ! comp_page = comp_page->next_hash_fs; ! } ! else { ! while (comp_page && comp_page->total_free_space < free_space) ! comp_page = comp_page->next_hash_tfs; ! } return comp_page; *************** *** 526,530 **** free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; ! free_space_hash = vmalloc(free_space_hash_size * sizeof(struct comp_cache_page *)); printk("Compressed Cache: free space (%u entries = %uB)\n", free_space_hash_size, free_space_hash_size * sizeof(struct comp_cache_page *)); --- 591,595 ---- free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; ! free_space_hash = (struct comp_cache_page **) kmalloc(free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); printk("Compressed Cache: free space (%u entries = %uB)\n", free_space_hash_size, free_space_hash_size * sizeof(struct comp_cache_page *)); *************** *** 534,537 **** --- 599,615 ---- memset((void *) free_space_hash, 0, free_space_hash_size * sizeof(struct comp_cache_page *)); + + /* inits comp cache total free space hash table */ + total_free_space_interval = 100 * ((float) PAGE_SIZE)/4096; + total_free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; + + total_free_space_hash = (struct comp_cache_page **) kmalloc(total_free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); + + printk("Compressed Cache: total free space (%u entries = %uB)\n", total_free_space_hash_size, total_free_space_hash_size * sizeof(struct comp_cache_page *)); + + if (!total_free_space_hash) + panic("comp_cache_hash_init(): couldn't allocate total free space hash table\n"); + + memset((void *) total_free_space_hash, 0, total_free_space_hash_size * sizeof(struct comp_cache_page *)); } Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** free.c 25 Jun 2002 14:34:07 -0000 1.33 --- free.c 11 Jul 2002 19:08:11 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-06-24 18:13:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-07-09 16:34:26 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 75,78 **** --- 75,142 ---- num_fragments--; comp_cache_free_space += fragment->compressed_size; + + /*** + * Add the fragment compressed size only to total_free_space + * field since fragments that will be standing to be merged + * cannot be added to free_space field at this moment + */ + fragment->comp_page->total_free_space += fragment->compressed_size; + } + + void + compact_fragments(struct comp_cache_page * comp_page) + { + struct comp_cache_fragment * fragment, * min_fragment = NULL; + struct list_head * fragment_lh, * tmp_lh, aux_fragment_list; + int min_offset = PAGE_SIZE + 1, num_fragments = 0, next_offset = 0; + + INIT_LIST_HEAD(&aux_fragment_list); + + /* remove all the freed fragments */ + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + + if (CompFragmentFreed(fragment)) { + list_del(&(fragment->list)); + comp_page->free_space += fragment->compressed_size; + if (!CompFragmentTestandClearIO(fragment)) + kmem_cache_free(fragment_cachep, (fragment)); + continue; + } + + /* fragment not yet freed */ + if (fragment->offset < min_offset) { + min_offset = fragment->offset; + min_fragment = fragment; + } + num_fragments++; + } + + /* compact the other fragments */ + while (num_fragments--) { + list_del(&min_fragment->list); + list_add(&min_fragment->list, &aux_fragment_list); + + memmove(page_address(comp_page->page) + next_offset, page_address(comp_page->page) + min_fragment->offset, min_fragment->compressed_size); + min_fragment->offset = next_offset; + + min_offset = PAGE_SIZE + 1; + next_offset += min_fragment->compressed_size; + + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + if (fragment->offset < min_offset) { + min_offset = fragment->offset; + min_fragment = fragment; + } + } + } + + comp_page->free_offset = next_offset; + + list_for_each_safe(fragment_lh, tmp_lh, &aux_fragment_list) { + list_del(fragment_lh); + list_add(fragment_lh, &(comp_page->fragments)); + } } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.51 retrieving revision 1.52 diff -C2 -r1.51 -r1.52 *** main.c 1 Jul 2002 21:36:50 -0000 1.51 --- main.c 11 Jul 2002 19:08:11 -0000 1.52 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-01 15:45:58 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-11 09:32:13 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 41,45 **** extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, int, unsigned int, int); inline void --- 41,45 ---- extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, unsigned int, int); inline void *************** *** 55,58 **** --- 55,64 ---- #endif if (write) { + /* if gfp_mask does not allow us to write out the + * page, unlock the page and set all the bits back */ + if (!(gfp_mask & __GFP_FS)) { + UnlockPage(page); + goto set_bits_back; + } writepage(page); return; *************** *** 65,69 **** BUG(); ! compress_page(page, 1, gfp_mask, priority); } --- 71,81 ---- BUG(); ! /* in the case we fail to compress the page, set the bits back ! * since that's a dirty page */ ! if (compress_page(page, 1, gfp_mask, priority)) ! return; ! set_bits_back: ! SetPageDirty(page); ! ClearPageLaunder(page); } *************** *** 109,113 **** comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, 1, gfp_mask, priority); /* if comp_page == NULL, get_comp_cache_page() gave up --- 121,125 ---- comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, 1, gfp_mask, priority); /* if comp_page == NULL, get_comp_cache_page() gave up *************** *** 124,127 **** --- 136,155 ---- set_fragment_algorithm(fragment, algorithm); + /* fix mapping stuff */ + page->mapping->nrpages++; + if (!dirty) { + list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); + goto copy_page; + } + + CompFragmentSetDirty(fragment); + list_add(&fragment->mapping_list, &fragment->mapping->dirty_comp_pages); + + /* the inode might have been synced in the meanwhile (if we + * slept to get a free comp cache entry above), so dirty it */ + if (page->mapping->host) + mark_inode_dirty_pages(page->mapping->host); + + copy_page: if (compressed(fragment)) { if (current_compressed_page != page) *************** *** 275,278 **** --- 303,307 ---- (*comp_page)->free_space = PAGE_SIZE; + (*comp_page)->total_free_space = PAGE_SIZE; (*comp_page)->free_offset = 0; (*comp_page)->page = page; Index: proc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** proc.c 5 Jul 2002 15:21:55 -0000 1.17 --- proc.c 11 Jul 2002 19:08:11 -0000 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-07-05 10:01:47 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-07-11 16:00:00 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 396,403 **** } ! #define FRAGMENTS_PRINTK \ num_fragments[0], num_fragments[1], num_fragments[2], num_fragments[3], \ num_fragments[4], num_fragments[5], num_fragments[6] ! #define FRAGMENTS_COUNT 7 int --- 396,403 ---- } ! #define HIST_PRINTK \ num_fragments[0], num_fragments[1], num_fragments[2], num_fragments[3], \ num_fragments[4], num_fragments[5], num_fragments[6] ! #define HIST_COUNT 7 int *************** *** 407,411 **** int length = 0, i; ! num_fragments = (unsigned long *) vmalloc(FRAGMENTS_COUNT * sizeof(unsigned long)); if (!num_fragments) { --- 407,411 ---- int length = 0, i; ! num_fragments = (unsigned long *) vmalloc(HIST_COUNT * sizeof(unsigned long)); if (!num_fragments) { *************** *** 414,420 **** } ! length = sprintf(page, "compressed cache - free space histogram\n"); ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(0, num_fragments); --- 414,420 ---- } ! length = sprintf(page, "compressed cache - free space histogram (free space x number of fragments)\n"); ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(0, num_fragments); *************** *** 424,431 **** 0, total1, ! FRAGMENTS_PRINTK); for (i = 1; i < free_space_hash_size - 1; i += 2) { ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(i, num_fragments); total2 = free_space_count(i + 1, num_fragments); --- 424,431 ---- 0, total1, ! HIST_PRINTK); for (i = 1; i < free_space_hash_size - 1; i += 2) { ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(i, num_fragments); total2 = free_space_count(i + 1, num_fragments); *************** *** 434,441 **** "%4d - %4d: %7lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", (i+1)*100-200?:1, (i+1)*100, total1 + total2, ! FRAGMENTS_PRINTK); } ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(free_space_hash_size - 1, num_fragments); --- 434,441 ---- "%4d - %4d: %7lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", (i+1)*100-200?:1, (i+1)*100, total1 + total2, ! HIST_PRINTK); } ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(free_space_hash_size - 1, num_fragments); *************** *** 444,450 **** (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, total1, ! FRAGMENTS_PRINTK); vfree(num_fragments); out: return proc_calc_metrics(page, start, off, count, eof, length); --- 444,498 ---- (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, total1, ! HIST_PRINTK); vfree(num_fragments); + out: + return proc_calc_metrics(page, start, off, count, eof, length); + } + + #define FRAG_INTERVAL 500 + #define FRAG_PRINTK \ + frag_space[0], frag_space[1], frag_space[2], frag_space[3], \ + frag_space[4], frag_space[5], frag_space[6], frag_space[7], \ + frag_space[8] + + int + comp_cache_frag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) + { + unsigned long * frag_space, total1, total2; + int length = 0, i; + + frag_space = (unsigned long *) vmalloc((PAGE_SIZE/FRAG_INTERVAL + 1) * sizeof(unsigned long)); + + if (!frag_space) { + printk("couldn't allocate data structures for fragmentation histogram\n"); + goto out; + } + + length = sprintf(page, + "compressed cache - fragmentation histogram (free space x fragmented space)\n" + " total <500 -1000 -1500 -2000 -2500 -3000 -3500 -4000 -4096\n"); + + for (i = 1; i < free_space_hash_size - 1; i += 2) { + memset((void *) frag_space, 0, (PAGE_SIZE/FRAG_INTERVAL + 1) * sizeof(unsigned long)); + total1 = fragmentation_count(i, frag_space); + total2 = fragmentation_count(i + 1, frag_space); + + length += sprintf(page + length, + "%4d - %4d: %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", + (i+1)*100-200?:1, (i+1)*100, total1 + total2, + FRAG_PRINTK); + } + + memset((void *) frag_space, 0, (PAGE_SIZE/FRAG_INTERVAL + 1)* sizeof(unsigned long)); + + total1 = free_space_count(free_space_hash_size - 1, frag_space); + length += sprintf(page + length, + "%4d - %4d: %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", + (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, + total1, + FRAG_PRINTK); + + vfree(frag_space); out: return proc_calc_metrics(page, start, off, count, eof, length); Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.57 retrieving revision 1.58 diff -C2 -r1.57 -r1.58 *** swapout.c 9 Jul 2002 13:15:24 -0000 1.57 --- swapout.c 11 Jul 2002 19:08:11 -0000 1.58 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-09 10:03:02 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-11 15:33:33 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 379,384 **** * @compressed_size: size of swap cache page in compressed state * - * @dirty: are we going to compress a dirty page? - * * @alloc: do we allocate in case the comp_page->page == NULL? Usually * yes, but in case we are going to store a page from page cache with --- 379,382 ---- *************** *** 390,396 **** * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int dirty, int alloc, unsigned int gfp_mask, int priority) { ! struct comp_cache_page * comp_page = NULL; struct comp_cache_fragment * fragment = NULL, * previous_fragment = NULL; struct list_head * fragment_lh; --- 388,394 ---- * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int alloc, unsigned int gfp_mask, int priority) { ! struct comp_cache_page * comp_page = NULL, ** hash_table; struct comp_cache_fragment * fragment = NULL, * previous_fragment = NULL; struct list_head * fragment_lh; *************** *** 407,411 **** page_cache_get(page); ! maxtry = 3; while (maxtry--) { --- 405,410 ---- page_cache_get(page); ! maxtry = 5; ! hash_table = free_space_hash; while (maxtry--) { *************** *** 414,418 **** while (maxscan--) { ! comp_page = search_comp_page_free_space(aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ --- 413,417 ---- while (maxscan--) { ! comp_page = search_comp_page(hash_table, aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ *************** *** 433,442 **** while (comp_page && TryLockPage(comp_page->page)) { ! if (aux_comp_size < comp_page->free_space) ! aux_comp_size = comp_page->free_space + 1; - do { - comp_page = comp_page->next_hash; - } while (comp_page && comp_page->free_space < compressed_size); } --- 432,452 ---- while (comp_page && TryLockPage(comp_page->page)) { ! if (hash_table == free_space_hash) { ! if (aux_comp_size < comp_page->free_space) ! aux_comp_size = comp_page->free_space + 1; ! ! do { ! comp_page = comp_page->next_hash_fs; ! } while (comp_page && comp_page->free_space < compressed_size); ! } ! else { ! if (aux_comp_size < comp_page->total_free_space) ! aux_comp_size = comp_page->total_free_space + 1; ! ! do { ! comp_page = comp_page->next_hash_tfs; ! } while (comp_page && comp_page->total_free_space < compressed_size); ! } } *************** *** 446,450 **** /* remove from free space hash table before update */ remove_comp_page_from_hash_table(comp_page); ! if (comp_page->free_space < compressed_size) BUG(); --- 456,466 ---- /* remove from free space hash table before update */ remove_comp_page_from_hash_table(comp_page); ! ! /* found a page whose total free space (ie, ! * free space + fragment space) is enough? so ! * we need to compact it */ ! if (hash_table == total_free_space_hash) ! compact_fragments(comp_page); ! if (comp_page->free_space < compressed_size) BUG(); *************** *** 453,456 **** --- 469,479 ---- } + if (hash_table == free_space_hash) { + hash_table = total_free_space_hash; + continue; + } + + hash_table = free_space_hash; + /*** * We couldn't find a comp page with enough free *************** *** 464,468 **** if (!writeout_fragments(gfp_mask, priority)) ! goto give_up; if (unlikely(current->need_resched)) { --- 487,491 ---- if (!writeout_fragments(gfp_mask, priority)) ! goto out_release; if (unlikely(current->need_resched)) { *************** *** 472,476 **** if (TryLockPage(page)) ! goto give_up; if (page_count(page) - !!page->buffers == 3) --- 495,499 ---- if (TryLockPage(page)) ! goto out_release; if (page_count(page) - !!page->buffers == 3) *************** *** 481,489 **** UnlockPage(page); - give_up: - if (!dirty) - goto out_release; - __set_page_dirty(page); - ClearPageLaunder(page); out_release: page_cache_release(page); --- 504,507 ---- *************** *** 541,544 **** --- 559,563 ---- /* let's update some important fields */ comp_page->free_space -= compressed_size; + comp_page->total_free_space -= compressed_size; comp_page->free_offset += compressed_size; *************** *** 583,600 **** out: - if (dirty) { - CompFragmentSetDirty(fragment); - list_add(&fragment->mapping_list, &fragment->mapping->dirty_comp_pages); - - /* mark the inode as having dirty pages since the - * inode might have been synced in the meanwhile (if - * we slept to get a free comp cache entry above) */ - if (page->mapping->host) - mark_inode_dirty_pages(page->mapping->host); - } - else - list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); - page->mapping->nrpages++; - /* we need to account the number of fragments that are from * swapper_space to correctly count the cached, swapcached --- 602,605 ---- *************** *** 625,629 **** comp_page = NULL; UnlockPage(page); ! goto give_up; } --- 630,634 ---- comp_page = NULL; UnlockPage(page); ! goto out_release; } |