linuxcompressed-checkins Mailing List for Linux Compressed Cache (Page 6)
Status: Beta
Brought to you by:
nitin_sf
You can subscribe to this list here.
2001 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(2) |
Nov
|
Dec
(31) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2002 |
Jan
(28) |
Feb
(50) |
Mar
(29) |
Apr
(6) |
May
(33) |
Jun
(36) |
Jul
(60) |
Aug
(7) |
Sep
(12) |
Oct
|
Nov
(13) |
Dec
(3) |
2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(9) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2006 |
Jan
(13) |
Feb
(4) |
Mar
(4) |
Apr
(1) |
May
|
Jun
(22) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Rodrigo S. de C. <rc...@us...> - 2002-07-15 20:52:26
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv1029/mm Modified Files: filemap.c page_alloc.c swap_state.c Log Message: Feature o Added feature to enable 8K pages (on i386). This option can only be selected if "Resize Compressed Cache On Demand" is enabled since it does not support pages with buffers. The motive to implement this idea is to make better use of the space reserved for compressed cache, since depending on the compression ratio, several fragments end up stored alone in a page. Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** filemap.c 5 Jul 2002 15:21:49 -0000 1.32 --- filemap.c 15 Jul 2002 20:52:23 -0000 1.33 *************** *** 1099,1104 **** lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page) && TryLockPage(page)) ! BUG(); #endif } --- 1099,1106 ---- lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page) && TryLockPage(page)) { ! ClearPageUptodate(page); ! BUG(); ! } #endif } Index: page_alloc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v retrieving revision 1.21 retrieving revision 1.22 diff -C2 -r1.21 -r1.22 *** page_alloc.c 18 Jun 2002 18:04:31 -0000 1.21 --- page_alloc.c 15 Jul 2002 20:52:23 -0000 1.22 *************** *** 642,655 **** int j = ZONE_NORMAL; zone_t *zone = contig_page_data.node_zones + j; ! zone_num_comp_pages = num_comp_pages; ! if (num_comp_pages > zone->size) ! num_comp_pages = zone->size; /* whoops: that should be zone->size minus zholes. Since * zholes is always 0 when calling free_area_init_core(), I * guess we don't have to worry about that now */ ! mask = ((zone->size - num_comp_pages)/zone_balance_ratio[j]); if (mask < zone_balance_min[j]) --- 642,659 ---- int j = ZONE_NORMAL; zone_t *zone = contig_page_data.node_zones + j; + int real_num_comp_pages; ! /* the real number of memory pages used by compressed cache */ ! real_num_comp_pages = comp_page_to_page(num_comp_pages); ! ! zone_num_comp_pages = real_num_comp_pages; ! if (real_num_comp_pages > zone->size) ! real_num_comp_pages = zone->size; /* whoops: that should be zone->size minus zholes. Since * zholes is always 0 when calling free_area_init_core(), I * guess we don't have to worry about that now */ ! mask = ((zone->size - real_num_comp_pages)/zone_balance_ratio[j]); if (mask < zone_balance_min[j]) Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.35 retrieving revision 1.36 diff -C2 -r1.35 -r1.36 *** swap_state.c 5 Jul 2002 15:21:50 -0000 1.35 --- swap_state.c 15 Jul 2002 20:52:23 -0000 1.36 *************** *** 216,219 **** --- 216,220 ---- set_vswap_allocating(entry); new_page = alloc_page(GFP_HIGHUSER); + clear_vswap_allocating(entry); if (!new_page) break; /* Out of memory */ *************** *** 252,256 **** if (new_page) page_cache_release(new_page); - clear_vswap_allocating(entry); return found_page; } --- 253,256 ---- |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-15 11:24:32
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv12760/mm/comp_cache Modified Files: main.c Log Message: Bug fix o Compilation error when Page Cache support is enabled and Resize Compressed Cache on demand is disabled. Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.52 retrieving revision 1.53 diff -C2 -r1.52 -r1.53 *** main.c 11 Jul 2002 19:08:11 -0000 1.52 --- main.c 15 Jul 2002 11:24:27 -0000 1.53 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-11 09:32:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-15 08:20:26 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 216,220 **** struct comp_cache_fragment * fragment; struct comp_cache_page * comp_page; ! unsigned short comp_size, dirty; struct page * old_page; int ret = 0; --- 216,220 ---- struct comp_cache_fragment * fragment; struct comp_cache_page * comp_page; ! unsigned short comp_size; struct page * old_page; int ret = 0; *************** *** 247,253 **** * page with a page reserved for compressed cache use */ comp_size = PAGE_SIZE; ! dirty = 0; ! ! comp_page = get_comp_cache_page(*page, comp_size, &fragment, dirty, 0, gfp_mask, priority); if (!comp_page) --- 247,251 ---- * page with a page reserved for compressed cache use */ comp_size = PAGE_SIZE; ! comp_page = get_comp_cache_page(*page, comp_size, &fragment, 0, gfp_mask, priority); if (!comp_page) |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-11 19:08:15
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv16722/mm/comp_cache Modified Files: adaptivity.c aux.c free.c main.c proc.c swapout.c Log Message: Feature o New proc entry (comp_cache_frag), showing the fragmentation in the compressed cache. o Every struct comp_cache_struct is added to two hash tables: free space and total free space. The former is the old one, showing the amount of free space that can be used right away. The latter shows the total free space, ie it also accounts the fragmented space. Thus, if there is a page with total space enough for a new fragment, we compact this page and return it to be used by the new fragment. The two tables are set up the same way. o Added back feature removed in 0.23pre9 due to a bug fix. That feature would allow pages to be compressed even when the gfp_mask does not allow. Now it is back and it won't write out a page if the gfp_mask does not allow. This feature allows a better use of the compressed cache space. Cleanup o Removed dirty parameter from get_comp_cache_page(). o Better descriptions of /proc/comp_cache_{hist,frag}. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** adaptivity.c 5 Jul 2002 15:21:54 -0000 1.33 --- adaptivity.c 11 Jul 2002 19:08:11 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-04 13:59:28 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-09 16:33:37 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 554,558 **** check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_comp_page_free_space(PAGE_SIZE); if (!empty_comp_page || !empty_comp_page->page) --- 554,558 ---- check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_comp_page(free_space_hash, PAGE_SIZE); if (!empty_comp_page || !empty_comp_page->page) Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.35 retrieving revision 1.36 diff -C2 -r1.35 -r1.36 *** aux.c 1 Jul 2002 21:36:50 -0000 1.35 --- aux.c 11 Jul 2002 19:08:11 -0000 1.36 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 18:04:55 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-11 15:45:38 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 13,17 **** #include <linux/pagemap.h> #include <linux/init.h> ! #include <linux/vmalloc.h> struct comp_cache_fragment ** fragment_hash; --- 13,17 ---- #include <linux/pagemap.h> #include <linux/init.h> ! #include <linux/slab.h> struct comp_cache_fragment ** fragment_hash; *************** *** 21,28 **** unsigned int fragment_hash_bits; ! static struct comp_cache_page ** free_space_hash; unsigned int free_space_hash_size; unsigned int free_space_interval; /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long --- 21,32 ---- unsigned int fragment_hash_bits; ! struct comp_cache_page ** free_space_hash; unsigned int free_space_hash_size; unsigned int free_space_interval; + struct comp_cache_page ** total_free_space_hash; + unsigned int total_free_space_hash_size; + unsigned int total_free_space_interval; + /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long *************** *** 198,202 **** BUG(); ! for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash, total++) { total_fragments = 0; --- 202,206 ---- BUG(); ! for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash_fs, total++) { total_fragments = 0; *************** *** 251,280 **** } inline void add_comp_page_to_hash_table(struct comp_cache_page * new_comp_page) { struct comp_cache_page ** comp_page; comp_page = &free_space_hash[free_space_hashfn(new_comp_page->free_space)]; ! if ((new_comp_page->next_hash = *comp_page)) ! (*comp_page)->pprev_hash = &new_comp_page->next_hash; *comp_page = new_comp_page; ! new_comp_page->pprev_hash = comp_page; } inline void remove_comp_page_from_hash_table(struct comp_cache_page * comp_page) { ! struct comp_cache_page *next = comp_page->next_hash; ! struct comp_cache_page **pprev = comp_page->pprev_hash; if (next) ! next->pprev_hash = pprev; *pprev = next; ! comp_page->pprev_hash = NULL; } struct comp_cache_page * ! search_comp_page_free_space(int free_space) { struct comp_cache_page * comp_page; int idx, i; --- 255,339 ---- } + unsigned long + fragmentation_count(int index, unsigned long * frag_space) { + struct comp_cache_page * comp_page; + struct comp_cache_fragment * fragment; + struct list_head * fragment_lh; + unsigned long total, fragmented_space; + + if (index < 0) + BUG(); + + if (index >= free_space_hash_size) + BUG(); + + for (comp_page = free_space_hash[index], total = 0; comp_page; comp_page = comp_page->next_hash_fs, total++) { + fragmented_space = 0; + + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + + if (CompFragmentFreed(fragment)) + fragmented_space += fragment->compressed_size; + } + + if (fragmented_space + comp_page->free_space != comp_page->total_free_space) + BUG(); + + frag_space[(int) fragmented_space/500]++; + } + + return total; + } + inline void add_comp_page_to_hash_table(struct comp_cache_page * new_comp_page) { struct comp_cache_page ** comp_page; + /* add to free space hash table */ comp_page = &free_space_hash[free_space_hashfn(new_comp_page->free_space)]; ! if ((new_comp_page->next_hash_fs = *comp_page)) ! (*comp_page)->pprev_hash_fs = &new_comp_page->next_hash_fs; ! ! *comp_page = new_comp_page; ! new_comp_page->pprev_hash_fs = comp_page; ! ! /* add to total free space hash table */ ! comp_page = &total_free_space_hash[free_space_hashfn(new_comp_page->total_free_space)]; ! ! if ((new_comp_page->next_hash_tfs = *comp_page)) ! (*comp_page)->pprev_hash_tfs = &new_comp_page->next_hash_tfs; *comp_page = new_comp_page; ! new_comp_page->pprev_hash_tfs = comp_page; } inline void remove_comp_page_from_hash_table(struct comp_cache_page * comp_page) { ! struct comp_cache_page *next; ! struct comp_cache_page **pprev; + /* remove from free space hash table */ + next = comp_page->next_hash_fs; + pprev = comp_page->pprev_hash_fs; + if (next) ! next->pprev_hash_fs = pprev; *pprev = next; ! comp_page->pprev_hash_fs = NULL; ! ! /* remove from total free space hash table */ ! next = comp_page->next_hash_tfs; ! pprev = comp_page->pprev_hash_tfs; ! ! if (next) ! next->pprev_hash_tfs = pprev; ! *pprev = next; ! comp_page->pprev_hash_tfs = NULL; } struct comp_cache_page * ! search_comp_page(struct comp_cache_page ** hash_table, int free_space) { struct comp_cache_page * comp_page; int idx, i; *************** *** 289,293 **** i = idx + 1; do { ! comp_page = free_space_hash[i++]; } while(i < free_space_hash_size && !comp_page); --- 348,352 ---- i = idx + 1; do { ! comp_page = hash_table[i++]; } while(i < free_space_hash_size && !comp_page); *************** *** 300,307 **** check_exact_size: ! comp_page = free_space_hash[idx]; ! while (comp_page && comp_page->free_space < free_space) ! comp_page = comp_page->next_hash; return comp_page; --- 359,372 ---- check_exact_size: ! comp_page = hash_table[idx]; ! if (hash_table == free_space_hash) { ! while (comp_page && comp_page->free_space < free_space) ! comp_page = comp_page->next_hash_fs; ! } ! else { ! while (comp_page && comp_page->total_free_space < free_space) ! comp_page = comp_page->next_hash_tfs; ! } return comp_page; *************** *** 526,530 **** free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; ! free_space_hash = vmalloc(free_space_hash_size * sizeof(struct comp_cache_page *)); printk("Compressed Cache: free space (%u entries = %uB)\n", free_space_hash_size, free_space_hash_size * sizeof(struct comp_cache_page *)); --- 591,595 ---- free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; ! free_space_hash = (struct comp_cache_page **) kmalloc(free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); printk("Compressed Cache: free space (%u entries = %uB)\n", free_space_hash_size, free_space_hash_size * sizeof(struct comp_cache_page *)); *************** *** 534,537 **** --- 599,615 ---- memset((void *) free_space_hash, 0, free_space_hash_size * sizeof(struct comp_cache_page *)); + + /* inits comp cache total free space hash table */ + total_free_space_interval = 100 * ((float) PAGE_SIZE)/4096; + total_free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; + + total_free_space_hash = (struct comp_cache_page **) kmalloc(total_free_space_hash_size * sizeof(struct comp_cache_page *), GFP_ATOMIC); + + printk("Compressed Cache: total free space (%u entries = %uB)\n", total_free_space_hash_size, total_free_space_hash_size * sizeof(struct comp_cache_page *)); + + if (!total_free_space_hash) + panic("comp_cache_hash_init(): couldn't allocate total free space hash table\n"); + + memset((void *) total_free_space_hash, 0, total_free_space_hash_size * sizeof(struct comp_cache_page *)); } Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** free.c 25 Jun 2002 14:34:07 -0000 1.33 --- free.c 11 Jul 2002 19:08:11 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-06-24 18:13:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-07-09 16:34:26 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 75,78 **** --- 75,142 ---- num_fragments--; comp_cache_free_space += fragment->compressed_size; + + /*** + * Add the fragment compressed size only to total_free_space + * field since fragments that will be standing to be merged + * cannot be added to free_space field at this moment + */ + fragment->comp_page->total_free_space += fragment->compressed_size; + } + + void + compact_fragments(struct comp_cache_page * comp_page) + { + struct comp_cache_fragment * fragment, * min_fragment = NULL; + struct list_head * fragment_lh, * tmp_lh, aux_fragment_list; + int min_offset = PAGE_SIZE + 1, num_fragments = 0, next_offset = 0; + + INIT_LIST_HEAD(&aux_fragment_list); + + /* remove all the freed fragments */ + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + + if (CompFragmentFreed(fragment)) { + list_del(&(fragment->list)); + comp_page->free_space += fragment->compressed_size; + if (!CompFragmentTestandClearIO(fragment)) + kmem_cache_free(fragment_cachep, (fragment)); + continue; + } + + /* fragment not yet freed */ + if (fragment->offset < min_offset) { + min_offset = fragment->offset; + min_fragment = fragment; + } + num_fragments++; + } + + /* compact the other fragments */ + while (num_fragments--) { + list_del(&min_fragment->list); + list_add(&min_fragment->list, &aux_fragment_list); + + memmove(page_address(comp_page->page) + next_offset, page_address(comp_page->page) + min_fragment->offset, min_fragment->compressed_size); + min_fragment->offset = next_offset; + + min_offset = PAGE_SIZE + 1; + next_offset += min_fragment->compressed_size; + + for_each_fragment(fragment_lh, comp_page) { + fragment = list_entry(fragment_lh, struct comp_cache_fragment, list); + if (fragment->offset < min_offset) { + min_offset = fragment->offset; + min_fragment = fragment; + } + } + } + + comp_page->free_offset = next_offset; + + list_for_each_safe(fragment_lh, tmp_lh, &aux_fragment_list) { + list_del(fragment_lh); + list_add(fragment_lh, &(comp_page->fragments)); + } } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.51 retrieving revision 1.52 diff -C2 -r1.51 -r1.52 *** main.c 1 Jul 2002 21:36:50 -0000 1.51 --- main.c 11 Jul 2002 19:08:11 -0000 1.52 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-01 15:45:58 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-11 09:32:13 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 41,45 **** extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, int, unsigned int, int); inline void --- 41,45 ---- extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, unsigned int, int); inline void *************** *** 55,58 **** --- 55,64 ---- #endif if (write) { + /* if gfp_mask does not allow us to write out the + * page, unlock the page and set all the bits back */ + if (!(gfp_mask & __GFP_FS)) { + UnlockPage(page); + goto set_bits_back; + } writepage(page); return; *************** *** 65,69 **** BUG(); ! compress_page(page, 1, gfp_mask, priority); } --- 71,81 ---- BUG(); ! /* in the case we fail to compress the page, set the bits back ! * since that's a dirty page */ ! if (compress_page(page, 1, gfp_mask, priority)) ! return; ! set_bits_back: ! SetPageDirty(page); ! ClearPageLaunder(page); } *************** *** 109,113 **** comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, 1, gfp_mask, priority); /* if comp_page == NULL, get_comp_cache_page() gave up --- 121,125 ---- comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, 1, gfp_mask, priority); /* if comp_page == NULL, get_comp_cache_page() gave up *************** *** 124,127 **** --- 136,155 ---- set_fragment_algorithm(fragment, algorithm); + /* fix mapping stuff */ + page->mapping->nrpages++; + if (!dirty) { + list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); + goto copy_page; + } + + CompFragmentSetDirty(fragment); + list_add(&fragment->mapping_list, &fragment->mapping->dirty_comp_pages); + + /* the inode might have been synced in the meanwhile (if we + * slept to get a free comp cache entry above), so dirty it */ + if (page->mapping->host) + mark_inode_dirty_pages(page->mapping->host); + + copy_page: if (compressed(fragment)) { if (current_compressed_page != page) *************** *** 275,278 **** --- 303,307 ---- (*comp_page)->free_space = PAGE_SIZE; + (*comp_page)->total_free_space = PAGE_SIZE; (*comp_page)->free_offset = 0; (*comp_page)->page = page; Index: proc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** proc.c 5 Jul 2002 15:21:55 -0000 1.17 --- proc.c 11 Jul 2002 19:08:11 -0000 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-07-05 10:01:47 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-07-11 16:00:00 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 396,403 **** } ! #define FRAGMENTS_PRINTK \ num_fragments[0], num_fragments[1], num_fragments[2], num_fragments[3], \ num_fragments[4], num_fragments[5], num_fragments[6] ! #define FRAGMENTS_COUNT 7 int --- 396,403 ---- } ! #define HIST_PRINTK \ num_fragments[0], num_fragments[1], num_fragments[2], num_fragments[3], \ num_fragments[4], num_fragments[5], num_fragments[6] ! #define HIST_COUNT 7 int *************** *** 407,411 **** int length = 0, i; ! num_fragments = (unsigned long *) vmalloc(FRAGMENTS_COUNT * sizeof(unsigned long)); if (!num_fragments) { --- 407,411 ---- int length = 0, i; ! num_fragments = (unsigned long *) vmalloc(HIST_COUNT * sizeof(unsigned long)); if (!num_fragments) { *************** *** 414,420 **** } ! length = sprintf(page, "compressed cache - free space histogram\n"); ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(0, num_fragments); --- 414,420 ---- } ! length = sprintf(page, "compressed cache - free space histogram (free space x number of fragments)\n"); ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(0, num_fragments); *************** *** 424,431 **** 0, total1, ! FRAGMENTS_PRINTK); for (i = 1; i < free_space_hash_size - 1; i += 2) { ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(i, num_fragments); total2 = free_space_count(i + 1, num_fragments); --- 424,431 ---- 0, total1, ! HIST_PRINTK); for (i = 1; i < free_space_hash_size - 1; i += 2) { ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(i, num_fragments); total2 = free_space_count(i + 1, num_fragments); *************** *** 434,441 **** "%4d - %4d: %7lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", (i+1)*100-200?:1, (i+1)*100, total1 + total2, ! FRAGMENTS_PRINTK); } ! memset((void *) num_fragments, 0, FRAGMENTS_COUNT * sizeof(unsigned long)); total1 = free_space_count(free_space_hash_size - 1, num_fragments); --- 434,441 ---- "%4d - %4d: %7lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", (i+1)*100-200?:1, (i+1)*100, total1 + total2, ! HIST_PRINTK); } ! memset((void *) num_fragments, 0, HIST_COUNT * sizeof(unsigned long)); total1 = free_space_count(free_space_hash_size - 1, num_fragments); *************** *** 444,450 **** (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, total1, ! FRAGMENTS_PRINTK); vfree(num_fragments); out: return proc_calc_metrics(page, start, off, count, eof, length); --- 444,498 ---- (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, total1, ! HIST_PRINTK); vfree(num_fragments); + out: + return proc_calc_metrics(page, start, off, count, eof, length); + } + + #define FRAG_INTERVAL 500 + #define FRAG_PRINTK \ + frag_space[0], frag_space[1], frag_space[2], frag_space[3], \ + frag_space[4], frag_space[5], frag_space[6], frag_space[7], \ + frag_space[8] + + int + comp_cache_frag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) + { + unsigned long * frag_space, total1, total2; + int length = 0, i; + + frag_space = (unsigned long *) vmalloc((PAGE_SIZE/FRAG_INTERVAL + 1) * sizeof(unsigned long)); + + if (!frag_space) { + printk("couldn't allocate data structures for fragmentation histogram\n"); + goto out; + } + + length = sprintf(page, + "compressed cache - fragmentation histogram (free space x fragmented space)\n" + " total <500 -1000 -1500 -2000 -2500 -3000 -3500 -4000 -4096\n"); + + for (i = 1; i < free_space_hash_size - 1; i += 2) { + memset((void *) frag_space, 0, (PAGE_SIZE/FRAG_INTERVAL + 1) * sizeof(unsigned long)); + total1 = fragmentation_count(i, frag_space); + total2 = fragmentation_count(i + 1, frag_space); + + length += sprintf(page + length, + "%4d - %4d: %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", + (i+1)*100-200?:1, (i+1)*100, total1 + total2, + FRAG_PRINTK); + } + + memset((void *) frag_space, 0, (PAGE_SIZE/FRAG_INTERVAL + 1)* sizeof(unsigned long)); + + total1 = free_space_count(free_space_hash_size - 1, frag_space); + length += sprintf(page + length, + "%4d - %4d: %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n", + (free_space_hash_size - 2) * 100 + 1, (int) PAGE_SIZE, + total1, + FRAG_PRINTK); + + vfree(frag_space); out: return proc_calc_metrics(page, start, off, count, eof, length); Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.57 retrieving revision 1.58 diff -C2 -r1.57 -r1.58 *** swapout.c 9 Jul 2002 13:15:24 -0000 1.57 --- swapout.c 11 Jul 2002 19:08:11 -0000 1.58 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-09 10:03:02 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-11 15:33:33 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 379,384 **** * @compressed_size: size of swap cache page in compressed state * - * @dirty: are we going to compress a dirty page? - * * @alloc: do we allocate in case the comp_page->page == NULL? Usually * yes, but in case we are going to store a page from page cache with --- 379,382 ---- *************** *** 390,396 **** * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int dirty, int alloc, unsigned int gfp_mask, int priority) { ! struct comp_cache_page * comp_page = NULL; struct comp_cache_fragment * fragment = NULL, * previous_fragment = NULL; struct list_head * fragment_lh; --- 388,394 ---- * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int alloc, unsigned int gfp_mask, int priority) { ! struct comp_cache_page * comp_page = NULL, ** hash_table; struct comp_cache_fragment * fragment = NULL, * previous_fragment = NULL; struct list_head * fragment_lh; *************** *** 407,411 **** page_cache_get(page); ! maxtry = 3; while (maxtry--) { --- 405,410 ---- page_cache_get(page); ! maxtry = 5; ! hash_table = free_space_hash; while (maxtry--) { *************** *** 414,418 **** while (maxscan--) { ! comp_page = search_comp_page_free_space(aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ --- 413,417 ---- while (maxscan--) { ! comp_page = search_comp_page(hash_table, aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ *************** *** 433,442 **** while (comp_page && TryLockPage(comp_page->page)) { ! if (aux_comp_size < comp_page->free_space) ! aux_comp_size = comp_page->free_space + 1; - do { - comp_page = comp_page->next_hash; - } while (comp_page && comp_page->free_space < compressed_size); } --- 432,452 ---- while (comp_page && TryLockPage(comp_page->page)) { ! if (hash_table == free_space_hash) { ! if (aux_comp_size < comp_page->free_space) ! aux_comp_size = comp_page->free_space + 1; ! ! do { ! comp_page = comp_page->next_hash_fs; ! } while (comp_page && comp_page->free_space < compressed_size); ! } ! else { ! if (aux_comp_size < comp_page->total_free_space) ! aux_comp_size = comp_page->total_free_space + 1; ! ! do { ! comp_page = comp_page->next_hash_tfs; ! } while (comp_page && comp_page->total_free_space < compressed_size); ! } } *************** *** 446,450 **** /* remove from free space hash table before update */ remove_comp_page_from_hash_table(comp_page); ! if (comp_page->free_space < compressed_size) BUG(); --- 456,466 ---- /* remove from free space hash table before update */ remove_comp_page_from_hash_table(comp_page); ! ! /* found a page whose total free space (ie, ! * free space + fragment space) is enough? so ! * we need to compact it */ ! if (hash_table == total_free_space_hash) ! compact_fragments(comp_page); ! if (comp_page->free_space < compressed_size) BUG(); *************** *** 453,456 **** --- 469,479 ---- } + if (hash_table == free_space_hash) { + hash_table = total_free_space_hash; + continue; + } + + hash_table = free_space_hash; + /*** * We couldn't find a comp page with enough free *************** *** 464,468 **** if (!writeout_fragments(gfp_mask, priority)) ! goto give_up; if (unlikely(current->need_resched)) { --- 487,491 ---- if (!writeout_fragments(gfp_mask, priority)) ! goto out_release; if (unlikely(current->need_resched)) { *************** *** 472,476 **** if (TryLockPage(page)) ! goto give_up; if (page_count(page) - !!page->buffers == 3) --- 495,499 ---- if (TryLockPage(page)) ! goto out_release; if (page_count(page) - !!page->buffers == 3) *************** *** 481,489 **** UnlockPage(page); - give_up: - if (!dirty) - goto out_release; - __set_page_dirty(page); - ClearPageLaunder(page); out_release: page_cache_release(page); --- 504,507 ---- *************** *** 541,544 **** --- 559,563 ---- /* let's update some important fields */ comp_page->free_space -= compressed_size; + comp_page->total_free_space -= compressed_size; comp_page->free_offset += compressed_size; *************** *** 583,600 **** out: - if (dirty) { - CompFragmentSetDirty(fragment); - list_add(&fragment->mapping_list, &fragment->mapping->dirty_comp_pages); - - /* mark the inode as having dirty pages since the - * inode might have been synced in the meanwhile (if - * we slept to get a free comp cache entry above) */ - if (page->mapping->host) - mark_inode_dirty_pages(page->mapping->host); - } - else - list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); - page->mapping->nrpages++; - /* we need to account the number of fragments that are from * swapper_space to correctly count the cached, swapcached --- 602,605 ---- *************** *** 625,629 **** comp_page = NULL; UnlockPage(page); ! goto give_up; } --- 630,634 ---- comp_page = NULL; UnlockPage(page); ! goto out_release; } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-11 19:08:15
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv16722/include/linux Modified Files: comp_cache.h Log Message: Feature o New proc entry (comp_cache_frag), showing the fragmentation in the compressed cache. o Every struct comp_cache_struct is added to two hash tables: free space and total free space. The former is the old one, showing the amount of free space that can be used right away. The latter shows the total free space, ie it also accounts the fragmented space. Thus, if there is a page with total space enough for a new fragment, we compact this page and return it to be used by the new fragment. The two tables are set up the same way. o Added back feature removed in 0.23pre9 due to a bug fix. That feature would allow pages to be compressed even when the gfp_mask does not allow. Now it is back and it won't write out a page if the gfp_mask does not allow. This feature allows a better use of the compressed cache space. Cleanup o Removed dirty parameter from get_comp_cache_page(). o Better descriptions of /proc/comp_cache_{hist,frag}. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.88 retrieving revision 1.89 diff -C2 -r1.88 -r1.89 *** comp_cache.h 9 Jul 2002 13:15:23 -0000 1.88 --- comp_cache.h 11 Jul 2002 19:08:10 -0000 1.89 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-09 10:03:27 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-11 15:28:35 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 75,84 **** /* fields for compression structure */ unsigned short free_offset; ! short free_space; struct list_head fragments; ! struct comp_cache_page * next_hash; ! struct comp_cache_page ** pprev_hash; }; --- 75,95 ---- /* fields for compression structure */ unsigned short free_offset; ! ! /* free space that can used right away */ ! short free_space; ! ! /* total free space = free_space + fragmented space, ie the ! * sum of all Freed fragments waiting to be merged */ ! short total_free_space; struct list_head fragments; ! /* free space hash table */ ! struct comp_cache_page * next_hash_fs; ! struct comp_cache_page ** pprev_hash_fs; ! ! /* total free space hash table */ ! struct comp_cache_page * next_hash_tfs; ! struct comp_cache_page ** pprev_hash_tfs; }; *************** *** 339,344 **** void comp_cache_init(void); inline int init_comp_page(struct comp_cache_page **,struct page *); ! inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int, int); ! inline int compress_clean_page(struct page *, unsigned int, int); extern unsigned long comp_cache_free_space; --- 350,355 ---- void comp_cache_init(void); inline int init_comp_page(struct comp_cache_page **,struct page *); ! inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int, int); ! inline int compress_clean_page(struct page *, unsigned int, int); extern unsigned long comp_cache_free_space; *************** *** 486,489 **** --- 497,501 ---- } + void compact_fragments(struct comp_cache_page *); #else *************** *** 525,531 **** --- 537,548 ---- } + extern struct comp_cache_page ** free_space_hash; extern unsigned int free_space_hash_size; extern unsigned int free_space_interval; + extern struct comp_cache_page ** total_free_space_hash; + extern unsigned int total_free_space_hash_size; + extern unsigned int total_free_space_interval; + static inline int free_space_hashfn(int free_space) { *************** *** 542,546 **** int set_pte_list_to_entry(struct pte_list *, swp_entry_t, swp_entry_t); ! struct comp_cache_page * search_comp_page_free_space(int); struct comp_cache_fragment ** create_fragment_hash(unsigned long *, unsigned int *, unsigned int *); --- 559,563 ---- int set_pte_list_to_entry(struct pte_list *, swp_entry_t, swp_entry_t); ! struct comp_cache_page * search_comp_page(struct comp_cache_page **, int); struct comp_cache_fragment ** create_fragment_hash(unsigned long *, unsigned int *, unsigned int *); *************** *** 551,554 **** --- 568,574 ---- inline void remove_fragment_from_lru_queue(struct comp_cache_fragment *); + unsigned long free_space_count(int, unsigned long *); + unsigned long fragmentation_count(int, unsigned long *); + /* enough memory functions */ #ifdef CONFIG_COMP_CACHE *************** *** 564,570 **** int comp_cache_stat_read_proc(char *, char **, off_t, int, int *, void *); int comp_cache_hist_read_proc(char *, char **, off_t, int, int *, void *); inline void comp_cache_update_page_stats(struct page *, int); - unsigned long free_space_count(int, unsigned long *); - #endif /* _LINUX_COMP_CACHE_H */ --- 584,589 ---- int comp_cache_stat_read_proc(char *, char **, off_t, int, int *, void *); int comp_cache_hist_read_proc(char *, char **, off_t, int, int *, void *); + int comp_cache_frag_read_proc(char *, char **, off_t, int, int *, void *); inline void comp_cache_update_page_stats(struct page *, int); #endif /* _LINUX_COMP_CACHE_H */ |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-11 19:08:15
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv16722/mm Modified Files: vmscan.c Log Message: Feature o New proc entry (comp_cache_frag), showing the fragmentation in the compressed cache. o Every struct comp_cache_struct is added to two hash tables: free space and total free space. The former is the old one, showing the amount of free space that can be used right away. The latter shows the total free space, ie it also accounts the fragmented space. Thus, if there is a page with total space enough for a new fragment, we compact this page and return it to be used by the new fragment. The two tables are set up the same way. o Added back feature removed in 0.23pre9 due to a bug fix. That feature would allow pages to be compressed even when the gfp_mask does not allow. Now it is back and it won't write out a page if the gfp_mask does not allow. This feature allows a better use of the compressed cache space. Cleanup o Removed dirty parameter from get_comp_cache_page(). o Better descriptions of /proc/comp_cache_{hist,frag}. Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.39 retrieving revision 1.40 diff -C2 -r1.39 -r1.40 *** vmscan.c 5 Jul 2002 15:21:52 -0000 1.39 --- vmscan.c 11 Jul 2002 19:08:11 -0000 1.40 *************** *** 410,414 **** --- 410,418 ---- writepage = page->mapping->a_ops->writepage; + #ifdef CONFIG_COMP_CACHE + if (writepage) + #else if ((gfp_mask & __GFP_FS) && writepage) + #endif { ClearPageDirty(page); |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-11 19:08:15
|
Update of /cvsroot/linuxcompressed/linux/fs/proc In directory usw-pr-cvs1:/tmp/cvs-serv16722/fs/proc Modified Files: proc_misc.c Log Message: Feature o New proc entry (comp_cache_frag), showing the fragmentation in the compressed cache. o Every struct comp_cache_struct is added to two hash tables: free space and total free space. The former is the old one, showing the amount of free space that can be used right away. The latter shows the total free space, ie it also accounts the fragmented space. Thus, if there is a page with total space enough for a new fragment, we compact this page and return it to be used by the new fragment. The two tables are set up the same way. o Added back feature removed in 0.23pre9 due to a bug fix. That feature would allow pages to be compressed even when the gfp_mask does not allow. Now it is back and it won't write out a page if the gfp_mask does not allow. This feature allows a better use of the compressed cache space. Cleanup o Removed dirty parameter from get_comp_cache_page(). o Better descriptions of /proc/comp_cache_{hist,frag}. Index: proc_misc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/proc/proc_misc.c,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -r1.5 -r1.6 *** proc_misc.c 29 May 2002 21:28:54 -0000 1.5 --- proc_misc.c 11 Jul 2002 19:08:10 -0000 1.6 *************** *** 526,529 **** --- 526,530 ---- {"comp_cache_stat", comp_cache_stat_read_proc}, {"comp_cache_hist", comp_cache_hist_read_proc}, + {"comp_cache_frag", comp_cache_frag_read_proc}, #endif {"devices", devices_read_proc}, |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-09 13:15:28
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv8282/include/linux Modified Files: comp_cache.h Log Message: Other o Change the number of reserved pages for swap buffer from 128 to 32. That improves performance since a huge number of swap buffer pages is unneeded and we end up freeing some memory that can be used more efficiently. o Removed the gfp_mask_buffer idea in refill_swp_buffer(). That variable was introduced because we wanted to first free the swap buffers that had its IO completely finished. Only in the case that doesn't happen that we used to try to free the buffers which needed to be synced. Unfortunally that turned out to be a bad idea since it could change the LRU order when freeing the fragments. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.87 retrieving revision 1.88 diff -C2 -r1.87 -r1.88 *** comp_cache.h 5 Jul 2002 15:21:49 -0000 1.87 --- comp_cache.h 9 Jul 2002 13:15:23 -0000 1.88 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-05 11:51:59 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-09 10:03:27 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 29,33 **** #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23pre9" /* maximum compressed size of a page */ --- 29,33 ---- #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23" /* maximum compressed size of a page */ *************** *** 177,181 **** #define INF 0xffffffff ! #define NUM_SWP_BUFFERS 128 /* do not change the fields order */ --- 177,181 ---- #define INF 0xffffffff ! #define NUM_SWP_BUFFERS 32 /* do not change the fields order */ |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-09 13:15:28
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv8282/mm/comp_cache Modified Files: swapout.c Log Message: Other o Change the number of reserved pages for swap buffer from 128 to 32. That improves performance since a huge number of swap buffer pages is unneeded and we end up freeing some memory that can be used more efficiently. o Removed the gfp_mask_buffer idea in refill_swp_buffer(). That variable was introduced because we wanted to first free the swap buffers that had its IO completely finished. Only in the case that doesn't happen that we used to try to free the buffers which needed to be synced. Unfortunally that turned out to be a bad idea since it could change the LRU order when freeing the fragments. Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.56 retrieving revision 1.57 diff -C2 -r1.56 -r1.57 *** swapout.c 1 Jul 2002 21:36:50 -0000 1.56 --- swapout.c 9 Jul 2002 13:15:24 -0000 1.57 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 18:13:35 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-09 10:03:02 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 30,37 **** struct swp_buffer * swp_buffer; struct comp_cache_fragment * fragment; - unsigned int gfp_mask_buffer; int wait, maxscan; - gfp_mask_buffer = gfp_mask & ~__GFP_IO; maxscan = max(NUM_SWP_BUFFERS/priority, (int) (nrpages * 1.5)); wait = 0; --- 30,35 ---- *************** *** 60,64 **** if (buffer_page->buffers) { list_del_init(swp_buffer_lh); ! if (!try_to_free_buffers(buffer_page, gfp_mask_buffer)) { list_add(swp_buffer_lh, &swp_used_buffer_head); --- 58,62 ---- if (buffer_page->buffers) { list_del_init(swp_buffer_lh); ! if (!try_to_free_buffers(buffer_page, gfp_mask)) { list_add(swp_buffer_lh, &swp_used_buffer_head); *************** *** 103,112 **** } ! /* couldn't free any swap buffer? so let's perform IO, if ! * gfp_mask allows, to free the page buffers */ if (!wait && nrpages > 0) { wait = 1; maxscan = max(NUM_SWP_BUFFERS >> 4, (int) (nrpages * 4)); - gfp_mask_buffer = gfp_mask; if (unlikely(current->need_resched)) { __set_current_state(TASK_RUNNING); --- 101,109 ---- } ! /* couldn't free any swap buffer? so let's start waiting for ! * the lock from the locked pages */ if (!wait && nrpages > 0) { wait = 1; maxscan = max(NUM_SWP_BUFFERS >> 4, (int) (nrpages * 4)); if (unlikely(current->need_resched)) { __set_current_state(TASK_RUNNING); |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-05 15:22:22
|
Update of /cvsroot/linuxcompressed/linux/fs/ncpfs In directory usw-pr-cvs1:/tmp/cvs-serv27959/fs/ncpfs Modified Files: dir.c Log Message: Bug fixes o Fixed potential oops in __read_cache_page() due to a misplaced steal_page_from_comp_cache() call. o Fixed bug in __read_swap_cache_async() which would end up getting an extra reference on a swap cache page and would also erroneously stop a swapin readahead. o Fixed major bug in shrink_cache() that could hang the machine. We could write a page in compress_dirty_page() even if the GFP mask does not allow! Cleanups o Added #ifdefs to some flush_comp_cache() calls. o Replaced find_comp_page() in do_swap_page() by a in_comp_cache() call. Other o Added page count check to shrink_comp_cache(). Only to make sure the page is going to be actually freed. o Removed write permission from /proc/sys/vm/comp_cache/size when resize on demand option is enabled. Index: dir.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/ncpfs/dir.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** dir.c 21 Mar 2002 19:24:17 -0000 1.3 --- dir.c 5 Jul 2002 15:21:49 -0000 1.4 *************** *** 452,456 **** --- 452,458 ---- goto invalid_cache; ctl.cache = kmap(ctl.page); + #ifdef CONFIG_COMP_PAGE_CACHE flush_comp_cache(ctl.page); + #endif if (!Page_Uptodate(ctl.page)) goto invalid_cache; |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-05 15:22:22
|
Update of /cvsroot/linuxcompressed/linux/fs/smbfs In directory usw-pr-cvs1:/tmp/cvs-serv27959/fs/smbfs Modified Files: dir.c Log Message: Bug fixes o Fixed potential oops in __read_cache_page() due to a misplaced steal_page_from_comp_cache() call. o Fixed bug in __read_swap_cache_async() which would end up getting an extra reference on a swap cache page and would also erroneously stop a swapin readahead. o Fixed major bug in shrink_cache() that could hang the machine. We could write a page in compress_dirty_page() even if the GFP mask does not allow! Cleanups o Added #ifdefs to some flush_comp_cache() calls. o Replaced find_comp_page() in do_swap_page() by a in_comp_cache() call. Other o Added page count check to shrink_comp_cache(). Only to make sure the page is going to be actually freed. o Removed write permission from /proc/sys/vm/comp_cache/size when resize on demand option is enabled. Index: dir.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/smbfs/dir.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** dir.c 21 Mar 2002 19:24:17 -0000 1.3 --- dir.c 5 Jul 2002 15:21:49 -0000 1.4 *************** *** 139,143 **** --- 139,145 ---- goto invalid_cache; ctl.cache = kmap(ctl.page); + #ifdef CONFIG_COMP_PAGE_CACHE flush_comp_cache(ctl.page); + #endif if (!Page_Uptodate(ctl.page)) goto invalid_cache; |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-05 15:22:22
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv27959/include/linux Modified Files: comp_cache.h Log Message: Bug fixes o Fixed potential oops in __read_cache_page() due to a misplaced steal_page_from_comp_cache() call. o Fixed bug in __read_swap_cache_async() which would end up getting an extra reference on a swap cache page and would also erroneously stop a swapin readahead. o Fixed major bug in shrink_cache() that could hang the machine. We could write a page in compress_dirty_page() even if the GFP mask does not allow! Cleanups o Added #ifdefs to some flush_comp_cache() calls. o Replaced find_comp_page() in do_swap_page() by a in_comp_cache() call. Other o Added page count check to shrink_comp_cache(). Only to make sure the page is going to be actually freed. o Removed write permission from /proc/sys/vm/comp_cache/size when resize on demand option is enabled. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.86 retrieving revision 1.87 diff -C2 -r1.86 -r1.87 *** comp_cache.h 1 Jul 2002 18:44:40 -0000 1.86 --- comp_cache.h 5 Jul 2002 15:21:49 -0000 1.87 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-01 15:29:46 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-05 11:51:59 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 29,33 **** #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23pre8" /* maximum compressed size of a page */ --- 29,33 ---- #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23pre9" /* maximum compressed size of a page */ *************** *** 553,558 **** /* enough memory functions */ #ifdef CONFIG_COMP_CACHE - inline int in_comp_cache(struct address_space *, unsigned long); extern int FASTCALL(find_comp_page(struct address_space *, unsigned long, struct comp_cache_fragment **)); #else static inline int find_comp_page(struct address_space * mapping, unsigned long offset, struct comp_cache_fragment ** fragment) { return -ENOENT; } --- 553,558 ---- /* enough memory functions */ #ifdef CONFIG_COMP_CACHE extern int FASTCALL(find_comp_page(struct address_space *, unsigned long, struct comp_cache_fragment **)); + inline int in_comp_cache(struct address_space *, unsigned long); #else static inline int find_comp_page(struct address_space * mapping, unsigned long offset, struct comp_cache_fragment ** fragment) { return -ENOENT; } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-05 15:21:58
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv27959/mm/comp_cache Modified Files: adaptivity.c proc.c Log Message: Bug fixes o Fixed potential oops in __read_cache_page() due to a misplaced steal_page_from_comp_cache() call. o Fixed bug in __read_swap_cache_async() which would end up getting an extra reference on a swap cache page and would also erroneously stop a swapin readahead. o Fixed major bug in shrink_cache() that could hang the machine. We could write a page in compress_dirty_page() even if the GFP mask does not allow! Cleanups o Added #ifdefs to some flush_comp_cache() calls. o Replaced find_comp_page() in do_swap_page() by a in_comp_cache() call. Other o Added page count check to shrink_comp_cache(). Only to make sure the page is going to be actually freed. o Removed write permission from /proc/sys/vm/comp_cache/size when resize on demand option is enabled. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** adaptivity.c 1 Jul 2002 18:44:40 -0000 1.32 --- adaptivity.c 5 Jul 2002 15:21:54 -0000 1.33 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-01 15:19:18 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-04 13:59:28 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 525,528 **** --- 525,530 ---- shrink: remove_comp_page_from_hash_table(empty_comp_page); + if (page_count(empty_comp_page->page) != 1) + BUG(); UnlockPage(empty_comp_page->page); page_cache_release(empty_comp_page->page); Index: proc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** proc.c 19 Jun 2002 12:18:44 -0000 1.16 --- proc.c 5 Jul 2002 15:21:55 -0000 1.17 *************** *** 2,6 **** * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-06-19 08:59:17 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/proc.c * ! * Time-stamp: <2002-07-05 10:01:47 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 55,59 **** ctl_table comp_cache_table[] = { ! {CC_SIZE, "size", &new_num_comp_pages, sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec, NULL, &min_num_comp_pages, &max_num_comp_pages}, --- 55,65 ---- ctl_table comp_cache_table[] = { ! {CC_SIZE, "size", &new_num_comp_pages, sizeof(int), ! #ifdef CONFIG_COMP_DEMAND_RESIZE ! 0444, ! #else ! 0644, ! #endif ! NULL, &proc_dointvec_minmax, &sysctl_intvec, NULL, &min_num_comp_pages, &max_num_comp_pages}, |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-05 15:21:58
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv27959/mm Modified Files: filemap.c memory.c shmem.c swap_state.c vmscan.c Log Message: Bug fixes o Fixed potential oops in __read_cache_page() due to a misplaced steal_page_from_comp_cache() call. o Fixed bug in __read_swap_cache_async() which would end up getting an extra reference on a swap cache page and would also erroneously stop a swapin readahead. o Fixed major bug in shrink_cache() that could hang the machine. We could write a page in compress_dirty_page() even if the GFP mask does not allow! Cleanups o Added #ifdefs to some flush_comp_cache() calls. o Replaced find_comp_page() in do_swap_page() by a in_comp_cache() call. Other o Added page count check to shrink_comp_cache(). Only to make sure the page is going to be actually freed. o Removed write permission from /proc/sys/vm/comp_cache/size when resize on demand option is enabled. Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** filemap.c 1 Jul 2002 17:37:29 -0000 1.31 --- filemap.c 5 Jul 2002 15:21:49 -0000 1.32 *************** *** 164,171 **** #ifdef CONFIG_COMP_CACHE if (PageTestandClearCompCache(page)) { ! if (PageMappedCompCache(page)) steal_page_from_comp_cache(page, NULL); ! else ! invalidate_comp_cache(mapping, page->index); } #endif --- 164,172 ---- #ifdef CONFIG_COMP_CACHE if (PageTestandClearCompCache(page)) { ! if (PageMappedCompCache(page)) { steal_page_from_comp_cache(page, NULL); ! return; ! } ! invalidate_comp_cache(mapping, page->index); } #endif *************** *** 1107,1115 **** } #ifdef CONFIG_COMP_PAGE_CACHE - /* - * Invalidate compressed cache entry since it may become - * obsolete. The caller(s) will probably overwrite the data - * from this page, without dirtying the page. - */ if (page) flush_comp_cache(page); --- 1108,1111 ---- *************** *** 2946,2953 **** if (cached_page) page_cache_release(cached_page); - steal_page_from_comp_cache(page, NULL); #ifdef CONFIG_COMP_PAGE_CACHE ! if (page) flush_comp_cache(page); #endif return page; --- 2942,2950 ---- if (cached_page) page_cache_release(cached_page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (page) { ! steal_page_from_comp_cache(page, NULL); flush_comp_cache(page); + } #endif return page; *************** *** 3011,3018 **** } #ifdef CONFIG_COMP_PAGE_CACHE - /* - * we have to invalidate the page since the caller function - * (generic_file_write) will overwrite the data in the page - */ flush_comp_cache(page); #endif --- 3008,3011 ---- Index: memory.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** memory.c 1 Jul 2002 17:37:29 -0000 1.32 --- memory.c 5 Jul 2002 15:21:49 -0000 1.33 *************** *** 1133,1139 **** page = lookup_swap_cache(entry); if (!page) { - struct comp_cache_fragment * fragment; /* perform readahead only if the page is on disk */ ! if (find_comp_page(&swapper_space, entry.val, &fragment)) { swapin_readahead(entry); /* major fault */ --- 1133,1138 ---- page = lookup_swap_cache(entry); if (!page) { /* perform readahead only if the page is on disk */ ! if (!in_comp_cache(&swapper_space, entry.val)) { swapin_readahead(entry); /* major fault */ *************** *** 1310,1314 **** --- 1309,1315 ---- if (write_access) { entry = pte_mkwrite(pte_mkdirty(entry)); + #ifdef CONFIG_COMP_PAGE_CACHE flush_comp_cache(new_page); + #endif } set_pte(page_table, entry); Index: shmem.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/shmem.c,v retrieving revision 1.20 retrieving revision 1.21 diff -C2 -r1.20 -r1.21 *** shmem.c 21 Mar 2002 19:24:17 -0000 1.20 --- shmem.c 5 Jul 2002 15:21:50 -0000 1.21 *************** *** 838,843 **** if (IS_ERR(page)) break; ! flush_comp_cache(page); /* We have exclusive IO access to the page.. */ --- 838,844 ---- if (IS_ERR(page)) break; ! #ifdef CONFIG_COMP_PAGE_CACHE flush_comp_cache(page); + #endif /* We have exclusive IO access to the page.. */ Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.34 retrieving revision 1.35 diff -C2 -r1.34 -r1.35 *** swap_state.c 1 Jul 2002 21:36:50 -0000 1.34 --- swap_state.c 5 Jul 2002 15:21:50 -0000 1.35 *************** *** 134,138 **** __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); ! swap_free(entry); page_cache_release(page); --- 134,138 ---- __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); ! swap_free(entry); page_cache_release(page); *************** *** 221,227 **** if (readahead) { ! struct page * tmp_page = find_get_page(&swapper_space, entry.val); ! if (tmp_page) break; if (in_comp_cache(&swapper_space, entry.val)) return new_page; --- 221,229 ---- if (readahead) { ! found_page = find_get_page(&swapper_space, entry.val); ! if (found_page) { ! steal_page_from_comp_cache(found_page, new_page); break; + } if (in_comp_cache(&swapper_space, entry.val)) return new_page; *************** *** 247,251 **** } } while (err != -ENOENT); ! if (new_page) page_cache_release(new_page); --- 249,253 ---- } } while (err != -ENOENT); ! if (new_page) page_cache_release(new_page); Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.38 retrieving revision 1.39 diff -C2 -r1.38 -r1.39 *** vmscan.c 1 Jul 2002 18:44:40 -0000 1.38 --- vmscan.c 5 Jul 2002 15:21:52 -0000 1.39 *************** *** 410,418 **** writepage = page->mapping->a_ops->writepage; - #ifdef CONFIG_COMP_CACHE - if (writepage) - #else if ((gfp_mask & __GFP_FS) && writepage) - #endif { ClearPageDirty(page); --- 410,414 ---- *************** *** 475,479 **** UnlockPage(page); page_cache_release(page); ! spin_lock(&pagemap_lru_lock); continue; --- 471,475 ---- UnlockPage(page); page_cache_release(page); ! spin_lock(&pagemap_lru_lock); continue; |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 21:36:54
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv18087/mm/comp_cache Modified Files: aux.c main.c swapout.c Log Message: Features o Added boot info for the max size of comp cache Bug fixes o Fixed bug introduced in lru queue handling functions cleanup. Only vswap addresses were affected. o Fixed another bug introduced in a cleanup. Again, only vswap addresses could hit this bug. Cleanups o Now we access Freed bit. This kind of fragment (Freed fragments) were referenced by !fragment->mapping. Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.34 retrieving revision 1.35 diff -C2 -r1.34 -r1.35 *** aux.c 1 Jul 2002 18:16:59 -0000 1.34 --- aux.c 1 Jul 2002 21:36:50 -0000 1.35 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 15:14:12 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 18:04:55 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 336,340 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (PageSwapCache(fragment)) { list_add(&(fragment->lru_queue), &lru_queue); return; --- 336,340 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (!PageSwapCache(fragment)) { list_add(&(fragment->lru_queue), &lru_queue); return; *************** *** 356,360 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (PageSwapCache(fragment)) { list_del_init(&(fragment->lru_queue)); return; --- 356,360 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (!PageSwapCache(fragment)) { list_del_init(&(fragment->lru_queue)); return; Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.50 retrieving revision 1.51 diff -C2 -r1.50 -r1.51 *** main.c 1 Jul 2002 17:37:29 -0000 1.50 --- main.c 1 Jul 2002 21:36:50 -0000 1.51 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-01 13:12:44 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-01 15:45:58 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 306,310 **** printk("Compressed Cache: %s\n", COMP_CACHE_VERSION); printk("Compressed Cache: initial size\n" ! "Compressed Cache: %lu pages = %luKiB\n", init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024); /* fiz zone watermarks */ --- 306,314 ---- printk("Compressed Cache: %s\n", COMP_CACHE_VERSION); printk("Compressed Cache: initial size\n" ! "Compressed Cache: %lu pages = %luKiB\n" ! "Compressed Cache: maximum size\n" ! "Compressed Cache: %lu pages = %luKiB\n", ! init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024, ! max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* fiz zone watermarks */ Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.55 retrieving revision 1.56 diff -C2 -r1.55 -r1.56 *** swapout.c 1 Jul 2002 18:16:59 -0000 1.55 --- swapout.c 1 Jul 2002 21:36:50 -0000 1.56 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 15:14:39 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 18:13:35 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 69,76 **** } ! /* has the fragment we are swapping out been already * freed? Given that we were on IO process, * comp_cache_free() didn't free the fragment struct, ! * so let's do it now */ if (!CompFragmentTestandClearIO(fragment)) { kmem_cache_free(fragment_cachep, (fragment)); --- 69,78 ---- } ! /*** ! * Has the fragment we are swapping out been already * freed? Given that we were on IO process, * comp_cache_free() didn't free the fragment struct, ! * so let's do it now. ! */ if (!CompFragmentTestandClearIO(fragment)) { kmem_cache_free(fragment_cachep, (fragment)); *************** *** 85,89 **** * comp_cache_free() */ ! if (!fragment->mapping) //!CompFragmentFreed(fragment)) goto out; --- 87,91 ---- * comp_cache_free() */ ! if (CompFragmentFreed(fragment)) goto out; |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 21:36:54
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv18087/mm Modified Files: swap_state.c Log Message: Features o Added boot info for the max size of comp cache Bug fixes o Fixed bug introduced in lru queue handling functions cleanup. Only vswap addresses were affected. o Fixed another bug introduced in a cleanup. Again, only vswap addresses could hit this bug. Cleanups o Now we access Freed bit. This kind of fragment (Freed fragments) were referenced by !fragment->mapping. Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** swap_state.c 1 Jul 2002 17:50:37 -0000 1.33 --- swap_state.c 1 Jul 2002 21:36:50 -0000 1.34 *************** *** 239,246 **** err = add_to_swap_cache(new_page, entry); if (!err) { ! if (readahead || read_comp_cache(&swapper_space, entry.val, new_page)) rw_swap_page(READ, new_page); ! if (vswap_address(entry)) ! BUG(); return new_page; } --- 239,247 ---- err = add_to_swap_cache(new_page, entry); if (!err) { ! if (readahead || read_comp_cache(&swapper_space, entry.val, new_page)) { ! if (vswap_address(entry)) ! BUG(); rw_swap_page(READ, new_page); ! } return new_page; } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 18:44:43
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv17101/mm/comp_cache Modified Files: adaptivity.c Log Message: Bug fix o Fixed bug that wouldn't allow the code to be compiled without compressed cache enabled. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** adaptivity.c 1 Jul 2002 18:16:59 -0000 1.31 --- adaptivity.c 1 Jul 2002 18:44:40 -0000 1.32 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-01 14:54:23 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-01 15:19:18 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 621,630 **** int ! grow_comp_cache(int nr_pages) { struct comp_cache_page * comp_page; struct page * page; ! while (comp_cache_needs_to_grow() && nr_pages--) { page = alloc_page(GFP_ATOMIC); --- 621,630 ---- int ! grow_comp_cache(int nrpages) { struct comp_cache_page * comp_page; struct page * page; ! while (comp_cache_needs_to_grow() && nrpages--) { page = alloc_page(GFP_ATOMIC); |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 18:44:43
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv17101/include/linux Modified Files: comp_cache.h Log Message: Bug fix o Fixed bug that wouldn't allow the code to be compiled without compressed cache enabled. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.85 retrieving revision 1.86 diff -C2 -r1.85 -r1.86 *** comp_cache.h 1 Jul 2002 17:37:28 -0000 1.85 --- comp_cache.h 1 Jul 2002 18:44:40 -0000 1.86 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-01 13:09:35 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-01 15:29:46 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 104,108 **** #else static inline int shrink_comp_cache(struct comp_cache_page * comp_page, int check_further) { return 0; } ! static inline void grow_comp_cache(zone_t * zone, int nr_pages) { } #endif --- 104,108 ---- #else static inline int shrink_comp_cache(struct comp_cache_page * comp_page, int check_further) { return 0; } ! static inline void grow_comp_cache(int nrpages) { } #endif *************** *** 352,363 **** #ifdef CONFIG_COMP_PAGE_CACHE void steal_page_from_comp_cache(struct page *, struct page *); - #ifndef CONFIG_COMP_DEMAND_RESIZE - int comp_cache_try_to_release_page(struct page **, int, int); #else ! static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { return try_to_release_page(*page, gfp_mask); } #endif #else static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { return try_to_release_page(*page, gfp_mask); } - static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {}; #endif --- 352,363 ---- #ifdef CONFIG_COMP_PAGE_CACHE void steal_page_from_comp_cache(struct page *, struct page *); #else ! static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {}; #endif + + #if defined(CONFIG_COMP_PAGE_CACHE) && !defined(CONFIG_COMP_DEMAND_RESIZE) + int comp_cache_try_to_release_page(struct page **, int, int); #else static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { return try_to_release_page(*page, gfp_mask); } #endif *************** *** 435,438 **** --- 435,441 ---- static inline int comp_cache_available_space(void) { return 0; } + + static inline void set_vswap_allocating(swp_entry_t entry) { }; + static inline void clear_vswap_allocating(swp_entry_t entry) { }; static inline void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {}; |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 18:44:43
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv17101/mm Modified Files: vmscan.c Log Message: Bug fix o Fixed bug that wouldn't allow the code to be compiled without compressed cache enabled. Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.37 retrieving revision 1.38 diff -C2 -r1.37 -r1.38 *** vmscan.c 1 Jul 2002 17:37:29 -0000 1.37 --- vmscan.c 1 Jul 2002 18:44:40 -0000 1.38 *************** *** 630,634 **** nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); if (nr_pages <= 0) { ! #ifndef CONFIG_COMP_DEMAND_RESIZE grow_comp_cache(SWAP_CLUSTER_MAX/2); #endif --- 630,634 ---- nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); if (nr_pages <= 0) { ! #if defined(CONFIG_COMP_CACHE) && !defined(CONFIG_COMP_DEMAND_RESIZE) grow_comp_cache(SWAP_CLUSTER_MAX/2); #endif |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 18:17:02
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv7779/mm/comp_cache Modified Files: adaptivity.c aux.c minilzo.c swapout.c Log Message: Cleanups o Changes to remove some compiler warnings. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.30 retrieving revision 1.31 diff -C2 -r1.30 -r1.31 *** adaptivity.c 27 Jun 2002 13:17:37 -0000 1.30 --- adaptivity.c 1 Jul 2002 18:16:59 -0000 1.31 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-06-27 09:32:53 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-07-01 14:54:23 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 82,86 **** extern kmem_cache_t * vswap_cachep; extern unsigned long nr_free_vswap; - static int wait_scan = 0; /*** --- 82,85 ---- Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** aux.c 1 Jul 2002 17:37:29 -0000 1.33 --- aux.c 1 Jul 2002 18:16:59 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 13:09:27 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 15:14:12 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 315,327 **** BUG(); ! if (!PageSwapCache(fragment)) ! goto add; ! entry.val = fragment->index; - if (vswap_address(entry)) return; - - add: list_add_tail(&(fragment->lru_queue), &lru_queue); } --- 315,328 ---- BUG(); ! #ifdef CONFIG_COMP_PAGE_CACHE ! if (!PageSwapCache(fragment)) { ! list_add_tail(&(fragment->lru_queue), &lru_queue); ! return; ! } ! #endif ! /* swap cache page */ entry.val = fragment->index; if (vswap_address(entry)) return; list_add_tail(&(fragment->lru_queue), &lru_queue); } *************** *** 335,348 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (!PageSwapCache(fragment)) ! goto add; #endif ! ! entry.val = fragment->index; ! if (vswap_address(entry)) return; - - add: list_add(&(fragment->lru_queue), &lru_queue); } --- 336,348 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (PageSwapCache(fragment)) { ! list_add(&(fragment->lru_queue), &lru_queue); ! return; ! } #endif ! /* swap cache page */ ! entry.val = fragment->index; if (vswap_address(entry)) return; list_add(&(fragment->lru_queue), &lru_queue); } *************** *** 356,369 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (!PageSwapCache(fragment)) ! goto remove; #endif ! entry.val = fragment->index; - if (vswap_address(entry)) return; - - remove: list_del_init(&(fragment->lru_queue)); } --- 356,368 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (PageSwapCache(fragment)) { ! list_del_init(&(fragment->lru_queue)); ! return; ! } #endif ! /* swap cache page */ entry.val = fragment->index; if (vswap_address(entry)) return; list_del_init(&(fragment->lru_queue)); } Index: minilzo.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/minilzo.c,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -r1.1 -r1.2 *** minilzo.c 29 May 2002 21:28:55 -0000 1.1 --- minilzo.c 1 Jul 2002 18:16:59 -0000 1.2 *************** *** 76,82 **** # include <stddef.h> # include <linux/string.h> - # if !defined(NO_STDLIB_H) - # include <stdlib.h> - # endif # define HAVE_MEMCMP # define HAVE_MEMCPY --- 76,79 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.54 retrieving revision 1.55 diff -C2 -r1.54 -r1.55 *** swapout.c 1 Jul 2002 17:37:30 -0000 1.54 --- swapout.c 1 Jul 2002 18:16:59 -0000 1.55 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 14:22:54 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 15:14:39 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 213,217 **** writeout_fragments(unsigned int gfp_mask, int priority) { int (*writepage)(struct page *); ! struct list_head * fragment_lh, * tmp_lh; int maxscan, nrpages, swap_cache_page; struct comp_cache_fragment * fragment; --- 213,217 ---- writeout_fragments(unsigned int gfp_mask, int priority) { int (*writepage)(struct page *); ! struct list_head * fragment_lh; int maxscan, nrpages, swap_cache_page; struct comp_cache_fragment * fragment; *************** *** 310,323 **** #if 0 ! /* That will move all the fragments in this comp page ! * to the beginning of lru queue to be swapped out as ! * soon as possible. This police may improve comp ! * pages usage (fragment distribution). */ ! for_each_fragment(tmp_lh, fragment->comp_page) { ! if (tmp_lh != fragment_lh) { ! struct comp_cache_fragment * tmp = list_entry(tmp_lh, struct comp_cache_fragment, list); ! if (!list_empty(&(tmp->lru_queue))) { ! remove_fragment_from_lru_queue(tmp); ! add_fragment_to_lru_queue_tail(tmp); } } --- 310,328 ---- #if 0 ! { ! struct list_head * tmp_lh; ! ! /* That will move all the fragments in this comp page ! * to the beginning of lru queue to be swapped out as ! * soon as possible. This police may improve comp ! * pages usage (fragment distribution). */ ! for_each_fragment(tmp_lh, fragment->comp_page) { ! if (tmp_lh != fragment_lh) { ! struct comp_cache_fragment * tmp; ! tmp = list_entry(tmp_lh, struct comp_cache_fragment, list); ! if (!list_empty(&(tmp->lru_queue))) { ! remove_fragment_from_lru_queue(tmp); ! add_fragment_to_lru_queue_tail(tmp); ! } } } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 17:50:43
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv30746/mm Modified Files: swap_state.c Log Message: Bug fix o Compile error fix. Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** swap_state.c 1 Jul 2002 17:37:29 -0000 1.32 --- swap_state.c 1 Jul 2002 17:50:37 -0000 1.33 *************** *** 221,228 **** if (readahead) { ! struct page * tmp_page = find_page_nolock(&swapper_space, entry.val); if (tmp_page) break; ! if (in_comp_cache(mapping, offset)) return new_page; } --- 221,228 ---- if (readahead) { ! struct page * tmp_page = find_get_page(&swapper_space, entry.val); if (tmp_page) break; ! if (in_comp_cache(&swapper_space, entry.val)) return new_page; } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 17:37:33
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv26297/mm/comp_cache Modified Files: aux.c main.c swapin.c swapout.c Log Message: Features o Some compressed cache functions (in particular the swap out ones) have priority parameter now, which will passed from VM main functions. The priority will show how far we should go on scanning lists to free spaces in compressed cache. o Fragments will not be decompressed when they are read from a read ahead (for swap and page cache). In this case, we only check if the fragment is in compressed cache. This police avoids LRU order to be changed. o Fragments will be removed from compressed cache if resize on demand is enabled. o Support for pages with buffers will only happen if resize on demand is disabled. Bug fixes o Fixed potential bug in __find_lock_page() where we would decompress a fragment and add a new page to page cache even if it had been just swapped in. Cleanups o Added #ifdefs in lru queues functions. o Several small cleanups Other o compress_page() only returns the page in locked state if it has been compressed. Otherwise, it will returned always unlocked. o now get_comp_cache_page() doesn't try so hard to get an entry. The maximum number of tries (which includes calls to writeout_fragments()) is 3. Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** aux.c 19 Jun 2002 12:18:44 -0000 1.32 --- aux.c 1 Jul 2002 17:37:29 -0000 1.33 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-06-19 08:45:54 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-07-01 13:09:27 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 334,339 **** --- 334,341 ---- BUG(); + #ifdef CONFIG_COMP_PAGE_CACHE if (!PageSwapCache(fragment)) goto add; + #endif entry.val = fragment->index; *************** *** 353,358 **** --- 355,362 ---- BUG(); + #ifdef CONFIG_COMP_PAGE_CACHE if (!PageSwapCache(fragment)) goto remove; + #endif entry.val = fragment->index; *************** *** 367,371 **** /* adapted version of __find_page_nolock:filemap.c */ int FASTCALL(find_comp_page(struct address_space *, unsigned long, struct comp_cache_fragment **)); ! int find_comp_page(struct address_space *mapping, unsigned long offset, struct comp_cache_fragment ** fragment) { struct comp_cache_fragment * fhash; --- 371,375 ---- /* adapted version of __find_page_nolock:filemap.c */ int FASTCALL(find_comp_page(struct address_space *, unsigned long, struct comp_cache_fragment **)); ! int find_comp_page(struct address_space * mapping, unsigned long offset, struct comp_cache_fragment ** fragment) { struct comp_cache_fragment * fhash; *************** *** 398,401 **** --- 402,413 ---- return err; } + + inline int + in_comp_cache(struct address_space * mapping, unsigned long offset) + { + struct comp_cache_fragment * fragment; + return !find_comp_page(mapping, offset, &fragment); + } + inline void Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.49 retrieving revision 1.50 diff -C2 -r1.49 -r1.50 *** main.c 27 Jun 2002 13:17:37 -0000 1.49 --- main.c 1 Jul 2002 17:37:29 -0000 1.50 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-06-27 09:31:15 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-07-01 13:12:44 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 41,48 **** extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, int, unsigned int); inline void ! compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask) { int write; --- 41,48 ---- extern unsigned long num_physpages; ! extern struct comp_cache_page * get_comp_cache_page(struct page *, unsigned short, struct comp_cache_fragment **, int, int, unsigned int, int); inline void ! compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask, int priority) { int write; *************** *** 54,59 **** write |= !PageSwapCache(page); #endif ! if (write) ! goto writepage; if (PageMappedCompCache(page)) --- 54,61 ---- write |= !PageSwapCache(page); #endif ! if (write) { ! writepage(page); ! return; ! } if (PageMappedCompCache(page)) *************** *** 63,76 **** BUG(); ! if (!compress_page(page, 1, gfp_mask)) ! UnlockPage(page); ! return; ! ! writepage: ! writepage(page); } inline int ! compress_clean_page(struct page * page, unsigned int gfp_mask) { /* that should not happen */ --- 65,73 ---- BUG(); ! compress_page(page, 1, gfp_mask, priority); } inline int ! compress_clean_page(struct page * page, unsigned int gfp_mask, int priority) { /* that should not happen */ *************** *** 85,93 **** return 1; #endif ! return compress_page(page, 0, gfp_mask); } int ! compress_page(struct page * page, int dirty, unsigned int gfp_mask) { struct comp_cache_page * comp_page; --- 82,90 ---- return 1; #endif ! return compress_page(page, 0, gfp_mask, priority); } int ! compress_page(struct page * page, int dirty, unsigned int gfp_mask, int priority) { struct comp_cache_page * comp_page; *************** *** 112,116 **** comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, 1, gfp_mask); /* if comp_page == NULL, get_comp_cache_page() gave up --- 109,113 ---- comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty); ! comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, 1, gfp_mask, priority); /* if comp_page == NULL, get_comp_cache_page() gave up *************** *** 185,190 **** } int ! comp_cache_try_to_release_page(struct page ** page, int gfp_mask) { struct comp_cache_fragment * fragment; --- 182,188 ---- } + #ifndef CONFIG_COMP_DEMAND_RESIZE int ! comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { struct comp_cache_fragment * fragment; *************** *** 223,227 **** dirty = 0; ! comp_page = get_comp_cache_page(*page, comp_size, &fragment, dirty, 0, gfp_mask); if (!comp_page) --- 221,225 ---- dirty = 0; ! comp_page = get_comp_cache_page(*page, comp_size, &fragment, dirty, 0, gfp_mask, priority); if (!comp_page) *************** *** 259,262 **** --- 257,261 ---- return ret; } + #endif #endif Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.42 retrieving revision 1.43 diff -C2 -r1.42 -r1.43 *** swapin.c 25 Jun 2002 14:34:08 -0000 1.42 --- swapin.c 1 Jul 2002 17:37:30 -0000 1.43 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-06-22 15:19:52 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-07-01 11:28:19 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 87,91 **** int ! read_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page, int access) { struct comp_cache_fragment * fragment; --- 87,91 ---- int ! read_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page) { struct comp_cache_fragment * fragment; *************** *** 107,118 **** /* move the fragment to the back of the lru list */ ! if (access) { ! remove_fragment_from_lru_queue(fragment); ! add_fragment_to_lru_queue(fragment); ! } decompress_fragment(fragment, page); ! #if 0 PageClearCompCache(page); --- 107,119 ---- /* move the fragment to the back of the lru list */ ! remove_fragment_from_lru_queue(fragment); ! add_fragment_to_lru_queue(fragment); decompress_fragment(fragment, page); ! /* update fault in stats */ ! comp_cache_update_faultin_stats(fragment); ! ! #ifdef CONFIG_COMP_DEMAND_RESIZE PageClearCompCache(page); *************** *** 120,131 **** __set_page_dirty(page); comp_cache_free(fragment); ! #endif ! ! /* update fault in stats */ ! comp_cache_update_faultin_stats(fragment); ! UnlockPage(fragment->comp_page->page); ! UnlockPage(page); out: return err; --- 121,131 ---- __set_page_dirty(page); + UnlockPage(fragment->comp_page->page); comp_cache_free(fragment); ! #else UnlockPage(fragment->comp_page->page); ! #endif ! ! UnlockPage(page); out: return err; Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.53 retrieving revision 1.54 diff -C2 -r1.53 -r1.54 *** swapout.c 25 Jun 2002 14:34:08 -0000 1.53 --- swapout.c 1 Jul 2002 17:37:30 -0000 1.54 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-06-22 14:55:33 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-07-01 14:22:54 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 24,28 **** static int ! refill_swp_buffer(unsigned int gfp_mask, int num_pages, int priority) { struct list_head * swp_buffer_lh; --- 24,28 ---- static int ! refill_swp_buffer(unsigned int gfp_mask, int nrpages, int priority) { struct list_head * swp_buffer_lh; *************** *** 34,42 **** gfp_mask_buffer = gfp_mask & ~__GFP_IO; ! maxscan = max(NUM_SWP_BUFFERS/priority, (int) (num_pages * 1.5)); wait = 0; ! try_again: ! while(--maxscan >= 0 && (swp_buffer_lh = swp_used_buffer_head.prev) != &swp_used_buffer_head) { swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list); buffer_page = swp_buffer->page; --- 34,42 ---- gfp_mask_buffer = gfp_mask & ~__GFP_IO; ! maxscan = max(NUM_SWP_BUFFERS/priority, (int) (nrpages * 1.5)); wait = 0; ! try_again: ! while(--maxscan >= 0 && nrpages && (swp_buffer_lh = swp_used_buffer_head.prev) != &swp_used_buffer_head) { swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list); buffer_page = swp_buffer->page; *************** *** 73,90 **** * comp_cache_free() didn't free the fragment struct, * so let's do it now */ ! if (!CompFragmentIO(fragment)) { kmem_cache_free(fragment_cachep, (fragment)); goto out; } ! /* in the case it is waiting for merge in ! * comp_cache_free(), we can't free it */ ! if (!fragment->mapping) { ! CompFragmentClearIO(fragment); goto out; - } /* it's not swapped out, so let' free it */ - CompFragmentClearIO(fragment); comp_cache_free(fragment); --- 73,92 ---- * comp_cache_free() didn't free the fragment struct, * so let's do it now */ ! if (!CompFragmentTestandClearIO(fragment)) { kmem_cache_free(fragment_cachep, (fragment)); goto out; } ! /*** ! * In the case it is waiting for merge in ! * comp_cache_free(), we don't have to free it. To be ! * clearer, it has been freed, except its data ! * structure, what will be freed when merged in ! * comp_cache_free() ! */ ! if (!fragment->mapping) //!CompFragmentFreed(fragment)) goto out; /* it's not swapped out, so let' free it */ comp_cache_free(fragment); *************** *** 95,113 **** list_add_tail(swp_buffer_lh, &swp_free_buffer_head); ! UnlockPage(buffer_page); ! --num_pages; } /* couldn't free any swap buffer? so let's perform IO, if * gfp_mask allows, to free the page buffers */ ! if (!wait && num_pages > 0) { wait = 1; ! maxscan = max(NUM_SWP_BUFFERS >> 4, (int) (num_pages * 4)); gfp_mask_buffer = gfp_mask; ! run_task_queue(&tq_disk); goto try_again; } ! return (num_pages > 0?0:1); } --- 97,118 ---- list_add_tail(swp_buffer_lh, &swp_free_buffer_head); ! UnlockPage(buffer_page); ! --nrpages; } /* couldn't free any swap buffer? so let's perform IO, if * gfp_mask allows, to free the page buffers */ ! if (!wait && nrpages > 0) { wait = 1; ! maxscan = max(NUM_SWP_BUFFERS >> 4, (int) (nrpages * 4)); gfp_mask_buffer = gfp_mask; ! if (unlikely(current->need_resched)) { ! __set_current_state(TASK_RUNNING); ! schedule(); ! } goto try_again; } ! return (nrpages > 0?0:1); } *************** *** 205,209 **** /* writeout_fragments() - write out some pages in the lru order until * we have a comp_page where we can store the new page */ ! static void writeout_fragments(unsigned int gfp_mask, int priority) { int (*writepage)(struct page *); --- 210,214 ---- /* writeout_fragments() - write out some pages in the lru order until * we have a comp_page where we can store the new page */ ! static int writeout_fragments(unsigned int gfp_mask, int priority) { int (*writepage)(struct page *); *************** *** 215,220 **** swp_entry_t entry; - maxscan = num_fragments/priority; nrpages = SWAP_CLUSTER_MAX; while (!list_empty(&lru_queue) && maxscan--) { --- 220,225 ---- swp_entry_t entry; nrpages = SWAP_CLUSTER_MAX; + maxscan = max((int) (num_fragments/priority), (int) (nrpages * 1.5)); while (!list_empty(&lru_queue) && maxscan--) { *************** *** 233,239 **** /* page locked */ ! if (TryLockPage(page)) { continue; - } /* page which has/had buffer? */ --- 238,243 ---- /* page locked */ ! if (TryLockPage(page)) continue; /* page which has/had buffer? */ *************** *** 291,297 **** } ! swap_cache_page = 0; ! if (PageSwapCache(fragment)) { ! swap_cache_page = 1; entry.val = fragment->index; if (vswap_address(entry)) --- 295,299 ---- } ! if ((swap_cache_page = PageSwapCache(fragment))) { entry.val = fragment->index; if (vswap_address(entry)) *************** *** 354,357 **** --- 356,361 ---- break; } + + return (!nrpages); } *************** *** 382,386 **** * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int dirty, int alloc, unsigned int gfp_mask) { struct comp_cache_page * comp_page = NULL; --- 386,390 ---- * @gfp_mask: we need to know if we can perform IO */ struct comp_cache_page * ! get_comp_cache_page(struct page * page, unsigned short compressed_size, struct comp_cache_fragment ** fragment_out, int dirty, int alloc, unsigned int gfp_mask, int priority) { struct comp_cache_page * comp_page = NULL; *************** *** 389,393 **** struct page * new_page; unsigned short aux_comp_size; ! int maxscan, maxtry, priority = 6; if (!page) --- 393,397 ---- struct page * new_page; unsigned short aux_comp_size; ! int maxscan, maxtry; if (!page) *************** *** 398,408 **** BUG(); - maxtry = num_comp_pages >> 1; - page_cache_get(page); ! while (maxtry-- && priority) { aux_comp_size = compressed_size; ! maxscan = num_comp_pages >> 3; while (maxscan--) { --- 402,411 ---- BUG(); page_cache_get(page); + maxtry = 3; ! while (maxtry--) { aux_comp_size = compressed_size; ! maxscan = 3; while (maxscan--) { *************** *** 456,485 **** UnlockPage(page); ! /*** ! * We didn't grow the compressed cache, thus it's time ! * to check if we able to free any fragment which was ! * waiting for IO completion. If we can't free any ! * fragment, it's time to write out some fragments. ! */ ! if (!refill_swp_buffer(gfp_mask, 1, priority)) ! writeout_fragments(gfp_mask, priority--); ! if (TryLockPage(page)) { ! if (!dirty) ! goto out_release; ! lock_page(page); ! goto set_bits_back; } ! if (page_count(page) - !!page->buffers != 3) ! break; } if (!dirty) goto out_release; - - set_bits_back: __set_page_dirty(page); ! ClearPageLaunder(page); out_release: page_cache_release(page); --- 459,485 ---- UnlockPage(page); ! if (!writeout_fragments(gfp_mask, priority)) ! goto give_up; ! if (unlikely(current->need_resched)) { ! __set_current_state(TASK_RUNNING); ! schedule(); } ! if (TryLockPage(page)) ! goto give_up; ! ! if (page_count(page) - !!page->buffers == 3) ! continue; ! ! break; } + UnlockPage(page); + give_up: if (!dirty) goto out_release; __set_page_dirty(page); ! ClearPageLaunder(page); out_release: page_cache_release(page); *************** *** 493,496 **** --- 493,498 ---- BUG(); } + if (!comp_page && PageLocked(page)) + BUG(); return comp_page; *************** *** 532,546 **** fragment->flags = 0; fragment->comp_page = comp_page; - - #if 0 - { - struct comp_cache_fragment * fout; - - if (!find_comp_page(page->mapping, page->index, &fout)) { - printk("found %p (index %08lx dirty %d sc %d) new %p (index %08lx dirty %d sc %d)\n", fout, fout->index, CompFragmentDirty(fout)?1:0, PageSwapCache(fragment)?1:0, fragment, fragment->index, dirty?1:0, PageSwapCache(fragment)?1:0); - BUG(); - } - } - #endif /* let's update some important fields */ --- 534,537 ---- *************** *** 629,636 **** UnlockPage(comp_page->page); comp_page = NULL; ! ! if (!dirty) ! goto out_release; ! goto set_bits_back; } --- 620,625 ---- UnlockPage(comp_page->page); comp_page = NULL; ! UnlockPage(page); ! goto give_up; } |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 17:37:32
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv26297/mm Modified Files: filemap.c memory.c swap_state.c vmscan.c Log Message: Features o Some compressed cache functions (in particular the swap out ones) have priority parameter now, which will passed from VM main functions. The priority will show how far we should go on scanning lists to free spaces in compressed cache. o Fragments will not be decompressed when they are read from a read ahead (for swap and page cache). In this case, we only check if the fragment is in compressed cache. This police avoids LRU order to be changed. o Fragments will be removed from compressed cache if resize on demand is enabled. o Support for pages with buffers will only happen if resize on demand is disabled. Bug fixes o Fixed potential bug in __find_lock_page() where we would decompress a fragment and add a new page to page cache even if it had been just swapped in. Cleanups o Added #ifdefs in lru queues functions. o Several small cleanups Other o compress_page() only returns the page in locked state if it has been compressed. Otherwise, it will returned always unlocked. o now get_comp_cache_page() doesn't try so hard to get an entry. The maximum number of tries (which includes calls to writeout_fragments()) is 3. Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.30 retrieving revision 1.31 diff -C2 -r1.30 -r1.31 *** filemap.c 19 Jun 2002 12:18:43 -0000 1.30 --- filemap.c 1 Jul 2002 17:37:29 -0000 1.31 *************** *** 749,754 **** * and schedules an I/O to read in its contents from disk. */ ! static int FASTCALL(page_cache_read(struct file * file, unsigned long offset, int access)); ! static int page_cache_read(struct file * file, unsigned long offset, int access) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; --- 749,757 ---- * and schedules an I/O to read in its contents from disk. */ ! #define page_cache_read(file, offset) __page_cache_read(file, offset, 0) ! #define page_cache_readahead(file, offset) __page_cache_read(file, offset, 1) ! ! static int FASTCALL(__page_cache_read(struct file * file, unsigned long offset, int readahead)); ! static int __page_cache_read(struct file * file, unsigned long offset, int readahead) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; *************** *** 766,773 **** return -ENOMEM; if (!add_to_page_cache_unique(page, mapping, offset, hash)) { int error = 0; #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, offset, page, access)) #endif error = mapping->a_ops->readpage(file, page); --- 769,784 ---- return -ENOMEM; + if (readahead) { + struct page * tmp_page = __find_page_nolock(mapping, offset, *hash); + if (!tmp_page && in_comp_cache(mapping, offset)) { + page_cache_release(page); + return 0; + } + } + if (!add_to_page_cache_unique(page, mapping, offset, hash)) { int error = 0; #ifdef CONFIG_COMP_PAGE_CACHE ! if (readahead || read_comp_cache(mapping, offset, page)) #endif error = mapping->a_ops->readpage(file, page); *************** *** 796,800 **** offset = CLUSTER_OFFSET(offset); while ((pages-- > 0) && (offset < filesize)) { ! int error = page_cache_read(file, offset, 0); if (error < 0) return error; --- 807,811 ---- offset = CLUSTER_OFFSET(offset); while ((pages-- > 0) && (offset < filesize)) { ! int error = page_cache_readahead(file, offset); if (error < 0) return error; *************** *** 1029,1037 **** page = __find_lock_page_helper(mapping, offset, *hash); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!page) { if (!cached_page) { - struct comp_cache_fragment * fragment; - if (find_comp_page(mapping, offset, &fragment)) - goto out; spin_unlock(&pagecache_lock); cached_page = page_cache_alloc(mapping); --- 1040,1045 ---- page = __find_lock_page_helper(mapping, offset, *hash); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!page && in_comp_cache(mapping, offset)) { if (!cached_page) { spin_unlock(&pagecache_lock); cached_page = page_cache_alloc(mapping); *************** *** 1039,1055 **** } ! if (TryLockPage(cached_page)) ! BUG(); ! ! if (read_comp_cache(mapping, offset, cached_page, 0)) { UnlockPage(cached_page); goto out; } page = cached_page; cached_page = NULL; - - add_to_page_cache(page, mapping, offset); - SetPageUptodate(page); } out: --- 1047,1066 ---- } ! if (add_to_page_cache_unique(cached_page, mapping, offset, hash)) ! goto out; ! ! if (read_comp_cache(mapping, offset, cached_page)) { ! __lru_cache_del(cached_page); ! __remove_inode_page(cached_page); UnlockPage(cached_page); + page_cache_release(cached_page); goto out; } + if (TryLockPage(cached_page)) + BUG(); + page = cached_page; cached_page = NULL; } out: *************** *** 1087,1094 **** lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page, 0)) { ! if (TryLockPage(page)) BUG(); - } #endif } --- 1098,1103 ---- lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page) && TryLockPage(page)) BUG(); #endif } *************** *** 1365,1369 **** if ((raend + ahead) >= end_index) break; ! if (page_cache_read(filp, raend + ahead, 0) < 0) break; } --- 1374,1378 ---- if ((raend + ahead) >= end_index) break; ! if (page_cache_readahead(filp, raend + ahead) < 0) break; } *************** *** 1629,1633 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page, 1)) goto page_ok; #endif --- 1638,1642 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page)) goto page_ok; #endif *************** *** 1936,1940 **** while (nr) { ! page_cache_read(file, index, 0); index++; nr--; --- 1945,1949 ---- while (nr) { ! page_cache_readahead(file, index); index++; nr--; *************** *** 2026,2030 **** struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused) { ! int error, in_comp_cache; struct file *file = area->vm_file; struct address_space *mapping = file->f_dentry->d_inode->i_mapping; --- 2035,2039 ---- struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused) { ! int error; struct file *file = area->vm_file; struct address_space *mapping = file->f_dentry->d_inode->i_mapping; *************** *** 2089,2103 **** * so we need to map a zero page. */ ! in_comp_cache = 0; ! { ! struct comp_cache_fragment * fragment; ! if (!find_comp_page(mapping, pgoff, &fragment)) ! in_comp_cache = 1; ! } ! ! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache) error = read_cluster_nonblocking(file, pgoff, size); else ! error = page_cache_read(file, pgoff, 1); /* --- 2098,2105 ---- * so we need to map a zero page. */ ! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache(mapping, pgoff)) error = read_cluster_nonblocking(file, pgoff, size); else ! error = page_cache_read(file, pgoff); /* *************** *** 2589,2593 **** } else { while ((start < end) && (start < size)) { ! error = page_cache_read(file, start, 0); start++; if (error < 0) --- 2591,2595 ---- } else { while ((start < end) && (start < size)) { ! error = page_cache_read(file, start); start++; if (error < 0) *************** *** 2932,2936 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, index, page, 1)) #endif err = filler(data, page); --- 2934,2938 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, index, page)) #endif err = filler(data, page); Index: memory.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** memory.c 19 Jun 2002 12:18:43 -0000 1.31 --- memory.c 1 Jul 2002 17:37:29 -0000 1.32 *************** *** 1109,1113 **** for (i = 0; i < num; offset++, i++) { /* Ok, do the async read-ahead now */ ! new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset)); if (!new_page) break; --- 1109,1113 ---- for (i = 0; i < num; offset++, i++) { /* Ok, do the async read-ahead now */ ! new_page = read_swap_cache_async_ahead(SWP_ENTRY(SWP_TYPE(entry), offset)); if (!new_page) break; Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** swap_state.c 25 Jun 2002 14:34:07 -0000 1.31 --- swap_state.c 1 Jul 2002 17:37:29 -0000 1.32 *************** *** 192,196 **** * the swap entry is no longer in use. */ ! struct page * read_swap_cache_async(swp_entry_t entry) { struct page *found_page, *new_page = NULL; --- 192,196 ---- * the swap entry is no longer in use. */ ! struct page * __read_swap_cache_async(swp_entry_t entry, int readahead) { struct page *found_page, *new_page = NULL; *************** *** 220,223 **** --- 220,231 ---- } + if (readahead) { + struct page * tmp_page = find_page_nolock(&swapper_space, entry.val); + if (tmp_page) + break; + if (in_comp_cache(mapping, offset)) + return new_page; + } + /* * Associate the page with swap entry in the swap cache. *************** *** 231,239 **** err = add_to_swap_cache(new_page, entry); if (!err) { ! if (!read_comp_cache(&swapper_space, entry.val, new_page, 1)) ! return new_page; if (vswap_address(entry)) BUG(); - rw_swap_page(READ, new_page); return new_page; } --- 239,246 ---- err = add_to_swap_cache(new_page, entry); if (!err) { ! if (readahead || read_comp_cache(&swapper_space, entry.val, new_page)) ! rw_swap_page(READ, new_page); if (vswap_address(entry)) BUG(); return new_page; } Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.36 retrieving revision 1.37 diff -C2 -r1.36 -r1.37 *** vmscan.c 25 Jun 2002 14:34:07 -0000 1.36 --- vmscan.c 1 Jul 2002 17:37:29 -0000 1.37 *************** *** 421,425 **** spin_unlock(&pagemap_lru_lock); ! compress_dirty_page(page, writepage, gfp_mask); page_cache_release(page); --- 421,425 ---- spin_unlock(&pagemap_lru_lock); ! compress_dirty_page(page, writepage, gfp_mask, priority); page_cache_release(page); *************** *** 427,436 **** spin_lock(&pagemap_lru_lock); - /*** - * if we could compress, it means that - * the page has neither been mapped - * back to any process nor freed, so - * we can go on freeing it here - */ if (!PageCompCache(page)) continue; --- 427,430 ---- *************** *** 449,453 **** page_cache_get(page); ! if (comp_cache_try_to_release_page(&page, gfp_mask)) { if (!page->mapping) { /* --- 443,447 ---- page_cache_get(page); ! if (comp_cache_try_to_release_page(&page, gfp_mask, priority)) { if (!page->mapping) { /* *************** *** 524,539 **** */ if (!PageCompCache(page)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); ! if (!compress_clean_page(page, gfp_mask)) { ! UnlockPage(page); ! page_cache_release(page); ! spin_lock(&pagemap_lru_lock); continue; - } - - page_cache_release(page); - spin_lock(&pagemap_lru_lock); } #endif --- 518,533 ---- */ if (!PageCompCache(page)) { + int compressed; + page_cache_get(page); spin_unlock(&pagemap_lru_lock); + + compressed = compress_clean_page(page, gfp_mask, priority); + + page_cache_release(page); + spin_lock(&pagemap_lru_lock); ! if (!compressed) continue; } #endif |
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 17:37:32
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv26297/include/linux Modified Files: comp_cache.h swap.h Log Message: Features o Some compressed cache functions (in particular the swap out ones) have priority parameter now, which will passed from VM main functions. The priority will show how far we should go on scanning lists to free spaces in compressed cache. o Fragments will not be decompressed when they are read from a read ahead (for swap and page cache). In this case, we only check if the fragment is in compressed cache. This police avoids LRU order to be changed. o Fragments will be removed from compressed cache if resize on demand is enabled. o Support for pages with buffers will only happen if resize on demand is disabled. Bug fixes o Fixed potential bug in __find_lock_page() where we would decompress a fragment and add a new page to page cache even if it had been just swapped in. Cleanups o Added #ifdefs in lru queues functions. o Several small cleanups Other o compress_page() only returns the page in locked state if it has been compressed. Otherwise, it will returned always unlocked. o now get_comp_cache_page() doesn't try so hard to get an entry. The maximum number of tries (which includes calls to writeout_fragments()) is 3. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.84 retrieving revision 1.85 diff -C2 -r1.84 -r1.85 *** comp_cache.h 27 Jun 2002 13:17:37 -0000 1.84 --- comp_cache.h 1 Jul 2002 17:37:28 -0000 1.85 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-06-27 09:33:08 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-07-01 13:09:35 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 314,318 **** extern int FASTCALL(flush_comp_cache(struct page *)); ! int read_comp_cache(struct address_space *, unsigned long, struct page *, int); int invalidate_comp_cache(struct address_space *, unsigned long); void invalidate_comp_pages(struct address_space *); --- 314,318 ---- extern int FASTCALL(flush_comp_cache(struct page *)); ! int read_comp_cache(struct address_space *, unsigned long, struct page *); int invalidate_comp_cache(struct address_space *, unsigned long); void invalidate_comp_pages(struct address_space *); *************** *** 323,327 **** #define there_are_locked_comp_pages(mapping) (!list_empty(&(mapping)->locked_comp_pages)) #else ! static inline int read_comp_cache(struct address_space * mapping, unsigned long offset, struct page * page, int access) { return -ENOENT; } static inline int invalidate_comp_cache(struct address_space * mapping, unsigned long offset) { return -ENOENT; } static inline int flush_comp_cache(struct page * page) { return -ENOENT; } --- 323,327 ---- #define there_are_locked_comp_pages(mapping) (!list_empty(&(mapping)->locked_comp_pages)) #else ! static inline int read_comp_cache(struct address_space * mapping, unsigned long offset, struct page * page) { return -ENOENT; } static inline int invalidate_comp_cache(struct address_space * mapping, unsigned long offset) { return -ENOENT; } static inline int flush_comp_cache(struct page * page) { return -ENOENT; } *************** *** 336,344 **** /* main.c */ #ifdef CONFIG_COMP_CACHE ! int compress_page(struct page *, int, unsigned int); void comp_cache_init(void); inline int init_comp_page(struct comp_cache_page **,struct page *); ! inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int); ! inline int compress_clean_page(struct page *, unsigned int); extern unsigned long comp_cache_free_space; --- 336,344 ---- /* main.c */ #ifdef CONFIG_COMP_CACHE ! int compress_page(struct page *, int, unsigned int, int); void comp_cache_init(void); inline int init_comp_page(struct comp_cache_page **,struct page *); ! inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int, int); ! inline int compress_clean_page(struct page *, unsigned int, int); extern unsigned long comp_cache_free_space; *************** *** 346,358 **** #else static inline void comp_cache_init(void) {}; ! static inline int compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask) { return writepage(page); } ! static inline int compress_clean_page(struct page * page, unsigned int gfp_mask) { return 1; } #endif #ifdef CONFIG_COMP_PAGE_CACHE - int comp_cache_try_to_release_page(struct page **, int); void steal_page_from_comp_cache(struct page *, struct page *); #else ! static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask) { return try_to_release_page(*page, gfp_mask); } static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {}; #endif --- 346,362 ---- #else static inline void comp_cache_init(void) {}; ! static inline int compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask, int priority) { return writepage(page); } ! static inline int compress_clean_page(struct page * page, unsigned int gfp_mask, int priority) { return 1; } #endif #ifdef CONFIG_COMP_PAGE_CACHE void steal_page_from_comp_cache(struct page *, struct page *); + #ifndef CONFIG_COMP_DEMAND_RESIZE + int comp_cache_try_to_release_page(struct page **, int, int); #else ! static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { return try_to_release_page(*page, gfp_mask); } ! #endif ! #else ! static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask, int priority) { return try_to_release_page(*page, gfp_mask); } static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {}; #endif *************** *** 546,552 **** --- 550,558 ---- /* enough memory functions */ #ifdef CONFIG_COMP_CACHE + inline int in_comp_cache(struct address_space *, unsigned long); extern int FASTCALL(find_comp_page(struct address_space *, unsigned long, struct comp_cache_fragment **)); #else static inline int find_comp_page(struct address_space * mapping, unsigned long offset, struct comp_cache_fragment ** fragment) { return -ENOENT; } + static inline int in_comp_cache(struct address_space * mapping, unsigned long offset) { return 0; } #endif Index: swap.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/swap.h,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** swap.h 27 Feb 2002 14:16:52 -0000 1.14 --- swap.h 1 Jul 2002 17:37:29 -0000 1.15 *************** *** 140,144 **** extern void free_page_and_swap_cache(struct page *page); extern struct page * lookup_swap_cache(swp_entry_t); ! extern struct page * read_swap_cache_async(swp_entry_t); /* linux/mm/oom_kill.c */ --- 140,146 ---- extern void free_page_and_swap_cache(struct page *page); extern struct page * lookup_swap_cache(swp_entry_t); ! extern struct page * __read_swap_cache_async(swp_entry_t, int); ! #define read_swap_cache_async(entry) __read_swap_cache_async(entry, 0) ! #define read_swap_cache_async_ahead(entry) __read_swap_cache_async(entry, 1) /* linux/mm/oom_kill.c */ |
From: Rodrigo S. de C. <rc...@us...> - 2002-06-27 13:17:42
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv1474/include/linux Modified Files: comp_cache.h Log Message: Bug fix: o Fixed potential bug when initializing a new comp page. It wasn't checked if the slab cache allocation had failed, so it could oops due to a null pointer. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.83 retrieving revision 1.84 diff -C2 -r1.83 -r1.84 *** comp_cache.h 25 Jun 2002 14:34:07 -0000 1.83 --- comp_cache.h 27 Jun 2002 13:17:37 -0000 1.84 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-06-23 12:35:16 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-06-27 09:33:08 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 338,342 **** int compress_page(struct page *, int, unsigned int); void comp_cache_init(void); ! inline void init_comp_page(struct comp_cache_page **,struct page *); inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int); inline int compress_clean_page(struct page *, unsigned int); --- 338,342 ---- int compress_page(struct page *, int, unsigned int); void comp_cache_init(void); ! inline int init_comp_page(struct comp_cache_page **,struct page *); inline void compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int); inline int compress_clean_page(struct page *, unsigned int); |
From: Rodrigo S. de C. <rc...@us...> - 2002-06-27 13:17:41
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv1474/mm/comp_cache Modified Files: adaptivity.c main.c Log Message: Bug fix: o Fixed potential bug when initializing a new comp page. It wasn't checked if the slab cache allocation had failed, so it could oops due to a null pointer. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.29 retrieving revision 1.30 diff -C2 -r1.29 -r1.30 *** adaptivity.c 25 Jun 2002 14:34:07 -0000 1.29 --- adaptivity.c 27 Jun 2002 13:17:37 -0000 1.30 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-06-25 10:32:29 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-06-27 09:32:53 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 634,638 **** return 0; ! init_comp_page(&comp_page, page); comp_cache_freeable_space += PAGE_SIZE; --- 634,641 ---- return 0; ! if (!init_comp_page(&comp_page, page)) { ! page_cache_release(page); ! return 0; ! } comp_cache_freeable_space += PAGE_SIZE; Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.48 retrieving revision 1.49 diff -C2 -r1.48 -r1.49 *** main.c 25 Jun 2002 14:34:07 -0000 1.48 --- main.c 27 Jun 2002 13:17:37 -0000 1.49 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-06-24 18:14:59 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-06-27 09:31:15 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 268,273 **** LIST_HEAD(lru_queue); ! inline void init_comp_page(struct comp_cache_page ** comp_page,struct page * page) { ! *comp_page = alloc_comp_cache(); (*comp_page)->free_space = PAGE_SIZE; (*comp_page)->free_offset = 0; --- 268,278 ---- LIST_HEAD(lru_queue); ! inline int ! init_comp_page(struct comp_cache_page ** comp_page,struct page * page) { ! *comp_page = alloc_comp_cache(); ! ! if (!(*comp_page)) ! return 0; ! (*comp_page)->free_space = PAGE_SIZE; (*comp_page)->free_offset = 0; *************** *** 275,278 **** --- 280,284 ---- INIT_LIST_HEAD(&((*comp_page)->fragments)); add_comp_page_to_hash_table((*comp_page)); + return 1; } *************** *** 320,324 **** for (i = 0; i < num_comp_pages; i++) { page = alloc_page(GFP_KERNEL); ! init_comp_page(&comp_page, page); } comp_cache_free_space = num_comp_pages * PAGE_SIZE; --- 326,332 ---- for (i = 0; i < num_comp_pages; i++) { page = alloc_page(GFP_KERNEL); ! ! if (!init_comp_page(&comp_page, page)) ! page_cache_release(page); } comp_cache_free_space = num_comp_pages * PAGE_SIZE; |