[lc-checkins] CVS: linux/mm filemap.c,1.40,1.41 page_alloc.c,1.24,1.25 swap_state.c,1.39,1.40 swapfi
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-11-22 16:02:09
|
Update of /cvsroot/linuxcompressed/linux/mm In directory sc8-pr-cvs1:/tmp/cvs-serv13256/mm Modified Files: filemap.c page_alloc.c swap_state.c swapfile.c vmscan.c Log Message: Features o New clean page adaptability. This policy disables compression of clean pages when it is not worth it (i.e., most pages are compressed and freed, without being reclaimed to the system). o Two new configuration options to disable the whole adaptability policy and clean page adaptability separately. It was most used for some tests, but it might be useful for someone which has compressed caching performing not very well. Bug Fixes o Make the LZO code compile on Athlon systems o __read_comp_cache(): if a dirty fragment was supposed to be freed, it wouldn't be actually freed because we forgot to drop a reference on the fragment. Cleanups o Lots, mainly in adaptivity.c Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.40 retrieving revision 1.41 diff -C2 -r1.40 -r1.41 *** filemap.c 10 Sep 2002 16:43:12 -0000 1.40 --- filemap.c 22 Nov 2002 16:01:34 -0000 1.41 *************** *** 776,780 **** return error; } ! } #endif error = mapping->a_ops->readpage(file, page); --- 776,782 ---- return error; } ! } ! if (clean_page_compress_lock) ! hit_clean_page(page); #endif error = mapping->a_ops->readpage(file, page); *************** *** 1549,1552 **** --- 1551,1558 ---- readpage: + #ifdef CONFIG_COMP_PAGE_CACHE + if (clean_page_compress_lock) + hit_clean_page(page); + #endif /* ... and start the actual read. The read will unlock the page. */ error = mapping->a_ops->readpage(filp, page); *************** *** 2103,2106 **** --- 2109,2116 ---- } + #ifdef CONFIG_COMP_PAGE_CACHE + if (clean_page_compress_lock) + hit_clean_page(page); + #endif if (!mapping->a_ops->readpage(file, page)) { wait_on_page(page); *************** *** 2130,2133 **** --- 2140,2147 ---- } ClearPageError(page); + #ifdef CONFIG_COMP_PAGE_CACHE + if (clean_page_compress_lock) + hit_clean_page(page); + #endif if (!mapping->a_ops->readpage(file, page)) { wait_on_page(page); Index: page_alloc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v retrieving revision 1.24 retrieving revision 1.25 diff -C2 -r1.24 -r1.25 *** page_alloc.c 10 Sep 2002 16:43:14 -0000 1.24 --- page_alloc.c 22 Nov 2002 16:01:34 -0000 1.25 *************** *** 638,662 **** { unsigned long mask; ! int j = ZONE_NORMAL; ! zone_t *zone = contig_page_data.node_zones + j; ! int real_num_comp_pages; ! ! /* the real number of memory pages used by compressed cache */ ! real_num_comp_pages = comp_page_to_page(num_comp_pages); ! zone_num_comp_pages = real_num_comp_pages; ! ! if (real_num_comp_pages > zone->size) ! real_num_comp_pages = zone->size; /* whoops: that should be zone->size minus zholes. Since * zholes is always 0 when calling free_area_init_core(), I * guess we don't have to worry about that now */ ! mask = ((zone->size - real_num_comp_pages)/zone_balance_ratio[j]); ! if (mask < zone_balance_min[j]) ! mask = zone_balance_min[j]; ! else if (mask > zone_balance_max[j]) ! mask = zone_balance_max[j]; zone->pages_min = mask; --- 638,665 ---- { unsigned long mask; ! zone_t *zone; ! int num_memory_pages; ! /* We don't have to worry if we have so much memory that it ! * will always be above the maximum value. As of 2.4.18, this ! * happens when we have 256M, since it always have a ! * (zone->size - num_memory_pages) greater than 128M */ ! //if (num_physpages >= 2 * zone_balance_ratio[ZONE_NORMAL] * zone_balance_max[ZONE_NORMAL]) ! //return; + /* the real number of memory pages used by compressed cache */ + zone_num_comp_pages = num_memory_pages = comp_page_to_page(num_comp_pages); + + zone = contig_page_data.node_zones + ZONE_NORMAL; + /* whoops: that should be zone->size minus zholes. Since * zholes is always 0 when calling free_area_init_core(), I * guess we don't have to worry about that now */ ! mask = ((zone->size - num_memory_pages)/zone_balance_ratio[ZONE_NORMAL]); ! if (mask < zone_balance_min[ZONE_NORMAL]) ! mask = zone_balance_min[ZONE_NORMAL]; ! else if (mask > zone_balance_max[ZONE_NORMAL]) ! mask = zone_balance_max[ZONE_NORMAL]; zone->pages_min = mask; *************** *** 664,679 **** zone->pages_high = mask*3; } - - void __init - comp_cache_init_fix_watermarks(int num_comp_pages) - { - zone_t *zone = contig_page_data.node_zones + ZONE_NORMAL; - - printk("Compressed Cache: page watermarks (normal zone)\nCompressed Cache: (%lu, %lu, %lu) -> ", - zone->pages_min, zone->pages_low, zone->pages_high); - comp_cache_fix_watermarks(num_comp_pages); - printk("(%lu, %lu, %lu)\n", zone->pages_min, zone->pages_low, zone->pages_high); - } - #endif --- 667,670 ---- Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.39 retrieving revision 1.40 diff -C2 -r1.39 -r1.40 *** swap_state.c 10 Sep 2002 16:43:16 -0000 1.39 --- swap_state.c 22 Nov 2002 16:01:35 -0000 1.40 *************** *** 244,247 **** --- 244,251 ---- if (get_swap_compressed(entry)) PageSetCompressed(new_page); + #ifdef CONFIG_COMP_PAGE_CACHE + if (clean_page_compress_lock) + hit_clean_page(new_page); + #endif rw_swap_page(READ, new_page); return new_page; Index: swapfile.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swapfile.c,v retrieving revision 1.35 retrieving revision 1.36 diff -C2 -r1.35 -r1.36 *** swapfile.c 10 Sep 2002 16:43:17 -0000 1.35 --- swapfile.c 22 Nov 2002 16:01:35 -0000 1.36 *************** *** 24,27 **** --- 24,30 ---- int total_swap_pages; static int swap_overflow; + #ifdef CONFIG_COMP_SWAP + unsigned long max_comp_swap_pages = 0; + #endif static const char Bad_file[] = "Bad swap file entry "; *************** *** 1347,1351 **** goto bad_swap; } - error = 0; memset(p->swap_map, 0, maxpages * sizeof(short)); --- 1350,1353 ---- *************** *** 1655,1657 **** return ret; } - --- 1657,1658 ---- Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.43 retrieving revision 1.44 diff -C2 -r1.43 -r1.44 *** vmscan.c 31 Jul 2002 12:31:05 -0000 1.43 --- vmscan.c 22 Nov 2002 16:01:36 -0000 1.44 *************** *** 520,523 **** --- 520,533 ---- if (!PageCompCache(page)) { int compressed; + + #ifndef CONFIG_COMP_DIS_CLEAN + /* enable this #if 0 to enable policy that + * stop STORING clean page in compressed + * cache */ + if (clean_page_compress_lock) { + add_clean_page(page); + goto check_freeable; + } + #endif page_cache_get(page); *************** *** 535,539 **** } ! spin_lock(&pagecache_lock); if (!is_page_cache_freeable(page)) { spin_unlock(&pagecache_lock); --- 545,550 ---- } ! spin_lock(&pagecache_lock); ! check_freeable: if (!is_page_cache_freeable(page)) { spin_unlock(&pagecache_lock); |