Thread: [lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.16,1.17 aux.c,1.23,1.24 free.c,1.26,1.27 main.
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:38
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv2510/mm/comp_cache
Modified Files:
adaptivity.c aux.c free.c main.c proc.c swapin.c swapout.c
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: adaptivity.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v
retrieving revision 1.16
retrieving revision 1.17
diff -C2 -r1.16 -r1.17
*** adaptivity.c 26 Mar 2002 12:35:10 -0000 1.16
--- adaptivity.c 28 Apr 2002 20:51:35 -0000 1.17
***************
*** 2,6 ****
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-03-26 09:21:19 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-04-03 12:33:28 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 18,21 ****
--- 18,168 ----
static int fragment_failed_alloc = 0, vswap_failed_alloc = 0;
+ struct preset_comp_cache * preset_comp_cache;
+ int nr_preset_sizes, current_preset_size;
+
+ static double time_comp = 0.3, time_decomp = 0.2, time_disk_read = 5;
+ int latest_uncomp_misses[10], latest_miss;
+
+ #define comp_cache_used_space ((real_num_comp_pages * PAGE_SIZE) - comp_cache_free_space)
+ #define comp_cache_total_space (preset_comp_cache[i].size * PAGE_SIZE)
+
+ /***
+ * adapt_comp_cache(void) - adapt compressed cache to the recent
+ * behaviour, resizing it if we would have better performance with
+ * another size.
+ *
+ * TODO
+ * - make compressed_ratio variable show the actual ratio
+ * - collect faults by lru region
+ * - account the number of swap cache pages in active and inactive lists?
+ */
+ void
+ adapt_comp_cache(void) {
+ static int nr = 0;
+ int i, best_size, nr_uncomp_misses, uncomp_size, delta_disk_reads, compress_ratio = 2;
+
+ if (++nr % 100)
+ return;
+
+ /* decay miss information */
+ i = (latest_miss + 1) % 10;
+ while (i != latest_miss) {
+ latest_uncomp_misses[i] = 0.8 * latest_uncomp_misses[i];
+ i = (i + 1) % 10;
+ }
+ latest_uncomp_misses[latest_miss] = nr_compressed_cache_misses + nr_swap_misses;
+
+ for (nr_uncomp_misses = 0, i = 0; i < 10; i++)
+ nr_uncomp_misses += latest_uncomp_misses[i];
+
+ latest_miss = (latest_miss + 1) % 10;
+
+ if (!nr_uncomp_misses)
+ return;
+
+ printk("nr_uncomp_misses %d\n", nr_uncomp_misses);
+ printk("free space %ld\n", (comp_cache_free_space * 100)/(real_num_comp_pages * PAGE_SIZE));
+
+ /* compute costs and benefits - smaller sizes*/
+ best_size = current_preset_size;
+ for (i = current_preset_size; i >= 0; i--) {
+ double cost, benefit;
+ int comp_size, delta_real_size;
+
+ comp_size = preset_comp_cache[i].size;
+ uncomp_size = num_physpages - comp_size;
+
+ delta_real_size = (comp_cache_total_space/compress_ratio);
+ printk("size %d real size %d used space %ld\n", preset_comp_cache[i].size, delta_real_size, comp_cache_used_space);
+
+ if (comp_cache_used_space < delta_real_size)
+ delta_disk_reads = 0;
+ else {
+ if (comp_cache_used_space > preset_comp_cache[i].size * PAGE_SIZE) {
+ delta_disk_reads = ((float) comp_size)/preset_comp_cache[current_preset_size].size * nr_compressed_cache_misses;
+ //printk("disk reads 1 %d\n", delta_disk_reads);
+ }
+ else {
+ delta_disk_reads = ((comp_cache_used_space - delta_real_size) * nr_compressed_cache_misses)/comp_cache_used_space;
+ //printk("disk reads 2 %d\n", delta_disk_reads);
+ }
+ }
+
+ cost = (nr_uncomp_misses * comp_size)/preset_comp_cache[current_preset_size].size;
+ printk("cost %d\n", (int) cost);
+ cost *= (time_comp + time_decomp);
+ benefit = delta_disk_reads * (time_disk_read);
+ printk("cost %d benefit %d\n", (int) cost, (int) benefit);
+
+ preset_comp_cache[i].profit = cost - benefit;
+
+ if (preset_comp_cache[i].profit < preset_comp_cache[best_size].profit)
+ best_size = i;
+
+ printk("profit %d -> %d (smaller)\n", i, preset_comp_cache[i].profit);
+ }
+
+ if (comp_cache_free_space > 0.30 * real_num_comp_pages * PAGE_SIZE)
+ goto out;
+
+ /* compute costs and benefits - larger sizes*/
+ for (i = current_preset_size + 1; i < nr_preset_sizes; i++) {
+ double cost, benefit;
+ int comp_size, diff_new_real_old_uncomp, incr_comp_size, scale = 0;
+
+ comp_size = preset_comp_cache[i].size;
+ uncomp_size = num_physpages - comp_size;
+
+ /* new real memory size in function of the new compressed cache size */
+ diff_new_real_old_uncomp = uncomp_size + comp_size/compress_ratio;
+ /* minus the current uncompressed cache */
+ diff_new_real_old_uncomp -= (num_physpages - preset_comp_cache[current_preset_size].size);
+
+ /* unlikely */
+ if (diff_new_real_old_uncomp > 0) {
+ printk("1st case\n");
+ scale = 1;
+ }
+
+ /* we can fill up the new comp cache space */
+ incr_comp_size = preset_comp_cache[i].size - preset_comp_cache[current_preset_size].size;
+ if (swapper_space.nrpages/compress_ratio > incr_comp_size) {
+ printk("fill up\n");
+ scale = 1;
+ }
+
+ printk("nr_compressed_cache_misses %d\n", nr_compressed_cache_misses);
+
+ if (scale)
+ delta_disk_reads = (1 - ((float) diff_new_real_old_uncomp/preset_comp_cache[current_preset_size].size)) * nr_compressed_cache_misses;
+ else {
+ delta_disk_reads = nr_compressed_cache_misses;
+ delta_disk_reads += ((((float) swapper_space.nrpages)/compress_ratio - (incr_comp_size + diff_new_real_old_uncomp)) * nr_compressed_cache_misses)/preset_comp_cache[current_preset_size].size;
+ printk("delta_disk_reads %d\n", delta_disk_reads);
+ }
+
+ cost = nr_uncomp_misses * ((float) preset_comp_cache[i].size/preset_comp_cache[current_preset_size].size);
+ cost *= (time_comp + time_decomp);
+ benefit = delta_disk_reads * (time_disk_read);
+ printk("cost %d benefit %d\n", (int) cost, (int) benefit);
+
+ preset_comp_cache[i].profit = cost - benefit;
+
+ printk("profit %d -> %d (bigger)\n", i, preset_comp_cache[i].profit);
+
+ if (preset_comp_cache[i].profit < preset_comp_cache[best_size].profit)
+ best_size = i;
+ }
+
+
+ out:
+ new_num_comp_pages = preset_comp_cache[best_size].size;
+ current_preset_size = best_size;
+ printk("best size %d\n", best_size);
+
+ /* reset stats */
+ nr_compressed_cache_misses = nr_swap_misses = 0;
+ }
+
void
resize_fragment_hash_table(void) {
***************
*** 417,420 ****
--- 564,568 ----
comp_cache_freeable_space -= PAGE_SIZE;
+ comp_cache_free_space -= PAGE_SIZE;
real_num_comp_pages--;
//printk("shrink new %lu real %lu\n", new_num_comp_pages, real_num_comp_pages);
***************
*** 495,498 ****
--- 643,647 ----
comp_cache_freeable_space += PAGE_SIZE;
+ comp_cache_free_space += PAGE_SIZE;
real_num_comp_pages++;
//printk("grow real %lu\n", real_num_comp_pages);
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -C2 -r1.23 -r1.24
*** aux.c 21 Mar 2002 19:24:17 -0000 1.23
--- aux.c 28 Apr 2002 20:51:35 -0000 1.24
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-03-20 16:48:44 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-04-18 13:00:30 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 293,302 ****
}
- inline int
- comp_cache_free_space(void)
- {
- return 2 * (comp_cache_freeable_space >> PAGE_SHIFT);
- }
-
inline void
print_all_fragments (comp_cache_t * comp_page)
--- 293,296 ----
***************
*** 372,424 ****
}
- void
- comp_cache_update_comp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats)
- {
- compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
- stats_summary_t * stats = &(algorithm->stats);
-
- stats->pgccout++;
-
- /* update compressed size statistics */
- if (!comp_page_stats->comp_size)
- BUG();
-
- if (comp_page_stats->comp_size < stats->comp_size_min)
- stats->comp_size_min = comp_page_stats->comp_size;
-
- if (comp_page_stats->comp_size > stats->comp_size_max)
- stats->comp_size_max = comp_page_stats->comp_size;
-
- stats->comp_size_sum += comp_page_stats->comp_size;
-
- /* update comp cycles statistics */
- if (comp_page_stats->comp_cycles < stats->comp_cycles_min)
- stats->comp_cycles_min = comp_page_stats->comp_cycles;
-
- if (comp_page_stats->comp_cycles > stats->comp_cycles_max)
- stats->comp_cycles_max = comp_page_stats->comp_cycles;
-
- stats->comp_cycles_sum += comp_page_stats->comp_cycles;
-
- if (((float) comp_page_stats->comp_size/PAGE_SIZE) > DISCARD_MARK)
- stats->discarded_pages++;
- }
-
- void
- comp_cache_update_decomp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats)
- {
- compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
- stats_summary_t * stats = &(algorithm->stats);
-
- /* update decomp cycles statistics */
- if (comp_page_stats->decomp_cycles < stats->decomp_cycles_min)
- stats->decomp_cycles_min = comp_page_stats->decomp_cycles;
-
- if (comp_page_stats->decomp_cycles > stats->decomp_cycles_max)
- stats->decomp_cycles_max = comp_page_stats->decomp_cycles;
-
- stats->decomp_cycles_sum += comp_page_stats->decomp_cycles;
- }
-
comp_cache_fragment_t **
create_fragment_hash(unsigned long * fragment_hash_size, unsigned int * bits, unsigned int * order) {
--- 366,369 ----
***************
*** 457,461 ****
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: fragment hash table - %lu = %luB\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
--- 402,406 ----
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: fragment hash table - %lu entries = %luB\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
***************
*** 465,469 ****
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space hash table - %u = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
--- 410,414 ----
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space hash table - %u entries = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** free.c 12 Mar 2002 17:54:20 -0000 1.26
--- free.c 28 Apr 2002 20:51:35 -0000 1.27
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-03-12 12:48:14 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-04-02 09:31:32 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 71,74 ****
--- 71,75 ----
list_del_init(&fragment->mapping_list);
fragment->mapping->nrpages--;
+ comp_cache_free_space += fragment->compressed_size;
}
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.33
retrieving revision 1.34
diff -C2 -r1.33 -r1.34
*** main.c 13 Mar 2002 20:44:33 -0000 1.33
--- main.c 28 Apr 2002 20:51:35 -0000 1.34
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-03-13 09:05:06 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-04-28 16:51:46 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 16,39 ****
#include <linux/pagemap.h>
- #include <linux/WKcommon.h>
- #include <linux/WKdm.h>
- #include <linux/WK4x4.h>
-
#include <asm/page.h>
#include <net/checksum.h>
- /* data used for compression */
- comp_data_t comp_data;
-
- WK_word compresseddata[1200];
- WK_word decompresseddata[1200];
- WK_word compressedtempTagsArray[300];
- WK_word compressedtempQPosArray[300];
- WK_word compressedtempLowBitsArray[1200];
-
- char compressedhashLookupTable_WKdm [] = HASH_LOOKUP_TABLE_CONTENTS_WKDM;
- unsigned int compressedhashLookupTable_WK4x4 [] = HASH_LOOKUP_TABLE_CONTENTS_WK4x4;
- DictionaryElement compresseddictionary[DICTIONARY_SIZE];
-
/* compressed cache */
unsigned long real_num_comp_pages = 0;
--- 16,22 ----
***************
*** 44,89 ****
unsigned long min_num_comp_pages = 0;
kmem_cache_t * comp_cachep;
kmem_cache_t * fragment_cachep;
! /* compression algorithms */
! compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! int current_algorithm;
extern unsigned long num_physpages;
extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int);
- extern void comp_cache_update_comp_stats(unsigned short, stats_page_t *);
! static inline void
! set_fragment_algorithm(comp_cache_fragment_t * fragment, unsigned short algorithm)
{
! switch(algorithm) {
! case WKDM_IDX:
! CompFragmentSetWKdm(fragment);
! break;
! case WK4X4_IDX:
! CompFragmentSetWK4x4(fragment);
! break;
! default:
! BUG();
! }
}
! static inline int
! compress(void * from, void * to, unsigned short * algorithm)
{
! stats_page_t comp_page_stats;
!
! START_ZEN_TIME(comp_page_stats.myTimer);
! comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
! STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
!
! /* update some statistics */
! comp_cache_update_comp_stats(current_algorithm, &comp_page_stats);
!
! *algorithm = current_algorithm;
!
! return ((comp_page_stats.comp_size <= PAGE_SIZE)?comp_page_stats.comp_size:PAGE_SIZE);
}
--- 27,75 ----
unsigned long min_num_comp_pages = 0;
+ unsigned long comp_cache_free_space;
+
kmem_cache_t * comp_cachep;
kmem_cache_t * fragment_cachep;
! int nr_swap_misses;
! int nr_compressed_cache_misses;
!
! static unsigned int skip_buffer = 0, skip_slab = 0, skip_dicache = 0;
extern unsigned long num_physpages;
extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int);
! /***
! * Skips a number of dirty buffer writes when we have much memory
! * pressure, since we want the system to put much more pressure on
! * pages from page and swap cache in order to have these kind of pages
! * compressed.
! *
! * TODO: make the number of skips dependent on relative compressed
! * cache size.
! */
! inline int
! comp_cache_skip_buffer_freeing(void)
{
! return (skip_buffer = ++skip_buffer % 500);
}
! /***
! * Same as above, but skips shrinking slab cache (kmem_cache_reap()).
! */
! inline int
! comp_cache_skip_slab_shrunk(void)
{
! return (skip_slab = ++skip_slab % 500);
! }
!
! /***
! * Same as above, but skips shrinking dcache and icache.
! */
! inline int
! comp_cache_skip_dicache_shrunk(void)
! {
! return (skip_dicache = ++skip_dicache % 500);
}
***************
*** 92,97 ****
{
int ret;
!
! if (likely(!shmem_page(page)))
ret = compress_page(page, 1, gfp_mask);
else
--- 78,87 ----
{
int ret;
!
! #ifdef CONFIG_COMP_PAGE_CACHE
! if (!shmem_page(page))
! #else
! if (PageSwapCache(page))
! #endif
ret = compress_page(page, 1, gfp_mask);
else
***************
*** 106,109 ****
--- 96,104 ----
return 0;
+ #ifndef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(page))
+ return 0;
+ #endif
+
page_cache_get(page);
spin_unlock(&pagecache_lock);
***************
*** 133,137 ****
if (!PageLocked(page))
BUG();
-
if (PageTestandClearCompCache(page)) {
if (!dirty)
--- 128,131 ----
***************
*** 139,151 ****
invalidate_comp_cache(page->mapping, page->index);
}
! comp_size = compress(page_address(current_compressed_page = page), buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
! * right now, because it won't be compressed. Since it had its
! * dirty bit cleared, we have to set it back, because it is
! * still dirty and must cleaned if needed */
if (!comp_page)
return 0;
--- 133,149 ----
invalidate_comp_cache(page->mapping, page->index);
}
+
+ #ifdef CONFIG_COMP_ADAPTIVITY
+ adapt_comp_cache();
+ #endif
! comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
! * right now, because it won't be compressed. Its dirty bit
! * has been set back in get_comp_cache_page() since it's still
! * dirty and needs to be cleaned. */
if (!comp_page)
return 0;
***************
*** 158,162 ****
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page_address(page), buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
--- 156,160 ----
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page, buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
***************
*** 175,194 ****
extern void __init comp_cache_vswap_init(void);
- #define RESET_STATS \
- do { \
- compression_algorithms[current_algorithm].stats.comp_size_sum = 0; \
- compression_algorithms[current_algorithm].stats.comp_size_max = 0; \
- compression_algorithms[current_algorithm].stats.comp_size_min = INF; \
- compression_algorithms[current_algorithm].stats.comp_cycles_sum = 0; \
- compression_algorithms[current_algorithm].stats.comp_cycles_max = 0; \
- compression_algorithms[current_algorithm].stats.comp_cycles_min = INF; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_sum = 0; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_max = 0; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_min = INF; \
- compression_algorithms[current_algorithm].stats.pgccin = 0; \
- compression_algorithms[current_algorithm].stats.pgccout = 0; \
- compression_algorithms[current_algorithm].stats.discarded_pages = 0; } \
- while (0)
-
LIST_HEAD(lru_queue);
--- 173,176 ----
***************
*** 209,234 ****
int i;
! max_num_comp_pages = num_physpages/2;
!
! if (!init_num_comp_pages)
! init_num_comp_pages = CONFIG_COMP_CACHE_SIZE;
if (!init_num_comp_pages || init_num_comp_pages > max_num_comp_pages)
! init_num_comp_pages = 512;
!
! new_num_comp_pages = real_num_comp_pages = init_num_comp_pages;
! min_num_comp_pages = 0;
!
! printk("Compressed Cache: starting %s - %lu pages = %luKiB\n", COMP_CACHE_VERSION, init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
!
! /* initialize our data for the `test' compressed_page */
! comp_data.compressed_data = compresseddata;
! comp_data.decompressed_data = decompresseddata;
! comp_data.hashLookupTable_WKdm = compressedhashLookupTable_WKdm;
! comp_data.hashLookupTable_WK4x4 = compressedhashLookupTable_WK4x4;
! comp_data.dictionary = compresseddictionary;
! comp_data.tempTagsArray = compressedtempTagsArray;
! comp_data.tempQPosArray = compressedtempQPosArray;
! comp_data.tempLowBitsArray = compressedtempLowBitsArray;
/* create slab caches */
--- 191,227 ----
int i;
! max_num_comp_pages = num_physpages * 0.5;
+ #ifndef CONFIG_COMP_ADAPTIVITY
if (!init_num_comp_pages || init_num_comp_pages > max_num_comp_pages)
! #endif
! init_num_comp_pages = num_physpages * 0.10;
! new_num_comp_pages = min_num_comp_pages = real_num_comp_pages = init_num_comp_pages;
!
! printk("Compressed Cache: starting %s version\n", COMP_CACHE_VERSION);
!
! /* adaptivity */
! nr_swap_misses = 0;
! nr_compressed_cache_misses = 0;
!
! nr_preset_sizes = 4;
! preset_comp_cache = (struct preset_comp_cache *) kmalloc(nr_preset_sizes * sizeof(*preset_comp_cache), GFP_ATOMIC);
!
! #ifdef CONFIG_COMP_ADAPTIVITY
! printk("Compressed Cache: adaptivity\n");
! preset_comp_cache[0].size = num_physpages * 0.10;
! preset_comp_cache[1].size = num_physpages * 0.23;
! preset_comp_cache[2].size = num_physpages * 0.37;
! preset_comp_cache[3].size = num_physpages * 0.50;
!
! for (i = 0; i < nr_preset_sizes; i++)
! printk("Compressed Cache: preset size %d: %u memory pages\n", i, preset_comp_cache[i].size);
!
! for (i = 0; i < 10; i++)
! latest_uncomp_misses[i] = 0;
! latest_miss = 0;
! #else
! printk("Compressed Cache: initial size - %lu pages = %luKiB\n", init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
! #endif
/* create slab caches */
***************
*** 249,277 ****
init_comp_page(&comp_page, page);
}
/* initialize our algorithms statistics array */
! for (current_algorithm = 0; current_algorithm < NUM_ALGORITHMS; current_algorithm++)
! RESET_STATS;
!
! strcpy(compression_algorithms[WKDM_IDX].name, "WKdm");
! compression_algorithms[WKDM_IDX].comp = WKdm_compress;
! compression_algorithms[WKDM_IDX].decomp = WKdm_decompress;
!
! strcpy(compression_algorithms[WK4X4_IDX].name, "WK4x4");
! compression_algorithms[WK4X4_IDX].comp = WK4x4_compress;
! compression_algorithms[WK4X4_IDX].decomp = WK4x4_decompress;
!
! current_algorithm = WKDM_IDX;
}
static int __init comp_cache_size(char *str)
{
! char * endp;
!
! init_num_comp_pages = simple_strtoul(str, &endp, 0);
! return 1;
}
__setup("compsize=", comp_cache_size);
/*
--- 242,262 ----
init_comp_page(&comp_page, page);
}
+ comp_cache_free_space = real_num_comp_pages * PAGE_SIZE;
/* initialize our algorithms statistics array */
! comp_cache_algorithms_init();
}
+ #ifndef CONFIG_COMP_ADAPTIVITY
static int __init comp_cache_size(char *str)
{
! char * endp;
!
! init_num_comp_pages = simple_strtoul(str, &endp, 0);
! return 1;
}
__setup("compsize=", comp_cache_size);
+ #endif
/*
Index: proc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -r1.6 -r1.7
*** proc.c 28 Dec 2001 21:45:24 -0000 1.6
--- proc.c 28 Apr 2002 20:51:35 -0000 1.7
***************
*** 2,6 ****
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2001-12-27 15:35:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-04-18 15:32:34 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 13,18 ****
#include <linux/sysctl.h>
extern unsigned long new_num_comp_pages, max_num_comp_pages, min_num_comp_pages;
! int algorithm_min = WKDM_IDX<WK4X4_IDX?WKDM_IDX:WK4X4_IDX, algorithm_max = WKDM_IDX>WK4X4_IDX?WKDM_IDX:WK4X4_IDX;
enum
--- 13,43 ----
#include <linux/sysctl.h>
+ #include <linux/WKcommon.h>
+ #include <linux/WKdm.h>
+ #include <linux/WK4x4.h>
+
+ #define NUM_ALGORITHMS 2
+ #define WKDM_IDX 0
+ #define WK4X4_IDX 1
+
extern unsigned long new_num_comp_pages, max_num_comp_pages, min_num_comp_pages;
!
! static compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! static int algorithm_min = WKDM_IDX;
! static int algorithm_max = WK4X4_IDX;
! static int current_algorithm;
!
! /* data used for compression */
! static comp_data_t comp_data;
!
! static WK_word compresseddata[1200];
! static WK_word decompresseddata[1200];
! static WK_word compressedtempTagsArray[300];
! static WK_word compressedtempQPosArray[300];
! static WK_word compressedtempLowBitsArray[1200];
!
! static char compressedhashLookupTable_WKdm [] = HASH_LOOKUP_TABLE_CONTENTS_WKDM;
! static unsigned int compressedhashLookupTable_WK4x4 [] = HASH_LOOKUP_TABLE_CONTENTS_WK4x4;
! static DictionaryElement compresseddictionary[DICTIONARY_SIZE];
enum
***************
*** 32,35 ****
--- 57,217 ----
};
+ static void
+ comp_cache_update_comp_stats(stats_page_t * comp_page_stats, struct page * page)
+ {
+ compression_algorithm_t * algorithm = &compression_algorithms[current_algorithm];
+ stats_summary_t * stats = &(algorithm->stats);
+
+ /* update compressed size statistics */
+ if (!comp_page_stats->comp_size)
+ BUG();
+
+ if (comp_page_stats->comp_size < stats->comp_size_min)
+ stats->comp_size_min = comp_page_stats->comp_size;
+
+ if (comp_page_stats->comp_size > stats->comp_size_max)
+ stats->comp_size_max = comp_page_stats->comp_size;
+
+ stats->comp_size_sum += comp_page_stats->comp_size;
+
+ /* update comp cycles statistics */
+ if (comp_page_stats->comp_cycles < stats->comp_cycles_min)
+ stats->comp_cycles_min = comp_page_stats->comp_cycles;
+
+ if (comp_page_stats->comp_cycles > stats->comp_cycles_max)
+ stats->comp_cycles_max = comp_page_stats->comp_cycles;
+
+ stats->comp_cycles_sum += comp_page_stats->comp_cycles;
+
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(page))
+ compression_algorithms[current_algorithm].stats.comp_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.comp_swap++;
+ }
+
+ static void
+ comp_cache_update_decomp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats, comp_cache_fragment_t * fragment)
+ {
+ compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
+ stats_summary_t * stats = &(algorithm->stats);
+
+ /* update decomp cycles statistics */
+ if (comp_page_stats->decomp_cycles < stats->decomp_cycles_min)
+ stats->decomp_cycles_min = comp_page_stats->decomp_cycles;
+
+ if (comp_page_stats->decomp_cycles > stats->decomp_cycles_max)
+ stats->decomp_cycles_max = comp_page_stats->decomp_cycles;
+
+ stats->decomp_cycles_sum += comp_page_stats->decomp_cycles;
+
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.decomp_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.decomp_swap++;
+ }
+
+ void
+ comp_cache_update_writeout_stats(comp_cache_fragment_t * fragment)
+ {
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.page_out++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.swap_out++;
+ }
+
+ void
+ comp_cache_update_faultin_stats(comp_cache_fragment_t * fragment)
+ {
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.faultin_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.faultin_swap++;
+ }
+
+ void
+ set_fragment_algorithm(comp_cache_fragment_t * fragment, unsigned short algorithm)
+ {
+ if (algorithm == WKDM...
[truncated message content] |