linuxcompressed-checkins Mailing List for Linux Compressed Cache (Page 9)
Status: Beta
Brought to you by:
nitin_sf
You can subscribe to this list here.
| 2001 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(2) |
Nov
|
Dec
(31) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2002 |
Jan
(28) |
Feb
(50) |
Mar
(29) |
Apr
(6) |
May
(33) |
Jun
(36) |
Jul
(60) |
Aug
(7) |
Sep
(12) |
Oct
|
Nov
(13) |
Dec
(3) |
| 2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(9) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2006 |
Jan
(13) |
Feb
(4) |
Mar
(4) |
Apr
(1) |
May
|
Jun
(22) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-23 13:52:47
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv28265/mm/comp_cache
Modified Files:
main.c
Log Message:
- our kernel parameter "compsize=" will accept as input the same values as
mem=, so you can enter the number of megabytes, kilobytes or even gigabytes
(although a compressed cache cannot have that size) without having to
convert this number into the number of memory pages. For example,
compsize=16M or compsize=1024K will work now.
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.37
retrieving revision 1.38
diff -C2 -r1.37 -r1.38
*** main.c 21 May 2002 18:49:06 -0000 1.37
--- main.c 23 May 2002 13:52:44 -0000 1.38
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-21 12:40:53 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-23 10:38:17 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 225,230 ****
{
char * endp;
! init_num_comp_pages = simple_strtoul(str, &endp, 0);
return 1;
}
--- 225,233 ----
{
char * endp;
+ unsigned long long comp_cache_size; /* size in bytes */
! comp_cache_size = memparse(str, &endp);
! init_num_comp_pages = comp_cache_size >> PAGE_SHIFT;
!
return 1;
}
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-21 20:33:28
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv4225/mm/comp_cache
Modified Files:
swapout.c
Log Message:
- find_free_swp_buffer() cleanups
- Fix a bug that could corrupt the fragment->mapping->locked_pages list. The
swap buffer page is removed from the list, but if it happens that its buffer
can't be freed, it's not added back (and will certainly be removed again).
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.43
retrieving revision 1.44
diff -C2 -r1.43 -r1.44
*** swapout.c 21 May 2002 18:49:06 -0000 1.43
--- swapout.c 21 May 2002 20:33:25 -0000 1.44
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-21 11:23:59 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-21 16:52:13 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 38,41 ****
--- 38,42 ----
struct list_head * swp_buffer_lh, * tmp_lh;
struct swp_buffer * swp_buffer;
+ comp_cache_fragment_t * old_fragment;
int wait;
***************
*** 52,61 ****
list_for_each_safe(swp_buffer_lh, tmp_lh, &swp_used_buffer_head) {
swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list);
! if (TryLockPage(swp_buffer->page)) {
if (!wait)
continue;
list_del_init(swp_buffer_lh);
! lock_page(swp_buffer->page);
}
--- 53,64 ----
list_for_each_safe(swp_buffer_lh, tmp_lh, &swp_used_buffer_head) {
swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list);
+ buffer_page = swp_buffer->page;
+ old_fragment = swp_buffer->fragment;
! if (TryLockPage(buffer_page)) {
if (!wait)
continue;
list_del_init(swp_buffer_lh);
! lock_page(buffer_page);
}
***************
*** 63,69 ****
* right before being returned to the caller, so let's
* remove it now from any mapping->*_pages list */
! list_del(&swp_buffer->page->list);
! if (swp_buffer->page->buffers) {
unsigned int gfp_mask_buffer = gfp_mask;
--- 66,72 ----
* right before being returned to the caller, so let's
* remove it now from any mapping->*_pages list */
! list_del(&buffer_page->list);
! if (buffer_page->buffers) {
unsigned int gfp_mask_buffer = gfp_mask;
***************
*** 76,82 ****
gfp_mask_buffer &= ~__GFP_IO;
list_del_init(swp_buffer_lh);
! if (!try_to_free_buffers(swp_buffer->page, gfp_mask_buffer)) {
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
! UnlockPage(swp_buffer->page);
continue;
}
--- 79,87 ----
gfp_mask_buffer &= ~__GFP_IO;
list_del_init(swp_buffer_lh);
! if (!try_to_free_buffers(buffer_page, gfp_mask_buffer)) {
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
!
! list_add(&buffer_page->list, &old_fragment->mapping->locked_comp_pages);
! UnlockPage(buffer_page);
continue;
}
***************
*** 85,90 ****
/* has the fragment we are swapping out been swapped
* in? so let's free only the fragment struct */
! if (!CompFragmentIO(swp_buffer->fragment)) {
! kmem_cache_free(fragment_cachep, (swp_buffer->fragment));
goto out;
}
--- 90,95 ----
/* has the fragment we are swapping out been swapped
* in? so let's free only the fragment struct */
! if (!CompFragmentIO(old_fragment)) {
! kmem_cache_free(fragment_cachep, (old_fragment));
goto out;
}
***************
*** 92,103 ****
/* in the case it is waiting for merge in
* comp_cache_free(), we can't free it */
! if (!swp_buffer->fragment->mapping) {
! CompFragmentClearIO(swp_buffer->fragment);
goto out;
}
/* it's not swapped out, so let' free it */
! CompFragmentClearIO(swp_buffer->fragment);
! comp_cache_free(swp_buffer->fragment);
out:
--- 97,108 ----
/* in the case it is waiting for merge in
* comp_cache_free(), we can't free it */
! if (!old_fragment->mapping) {
! CompFragmentClearIO(old_fragment);
goto out;
}
/* it's not swapped out, so let' free it */
! CompFragmentClearIO(old_fragment);
! comp_cache_free(old_fragment);
out:
***************
*** 111,119 ****
if (!CompFragmentIO(fragment) || CompFragmentFreed(fragment)) {
CompFragmentClearIO(fragment);
! UnlockPage(swp_buffer->page);
return NULL;
}
! UnlockPage(swp_buffer->page);
if (wait)
--- 116,124 ----
if (!CompFragmentIO(fragment) || CompFragmentFreed(fragment)) {
CompFragmentClearIO(fragment);
! UnlockPage(buffer_page);
return NULL;
}
! UnlockPage(buffer_page);
if (wait)
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-21 18:49:11
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv1879/mm/comp_cache
Modified Files:
aux.c main.c proc.c swapin.c swapout.c
Log Message:
- New statistics for compressed dirty and clean pages
- Cleanups regarding comp_cache_skip_* prototypes. These functions will no
longer be used in compressed cache since they impose fundamental changes
upon the VM that aren't worth it and would require long time researching and
testing.
- Some comestic changes regarding the information printed on boot process.
- Now we fix the normal zone watermarks when compressed cache is initialized
to make sure we don't put too much (and unneeded) pressure on the
uncompressed cache. For example, on a system with 48M RAM and 24M compressed
cache, the watermarks on the remaining 24M uncompressed cache will have been
wrongly computed on the 48M total memory. Now we make sure that the
watermarks are computed on the correct amount of uncompressed cache
available memory. This information (old and new watermarks) is printed along
with the initial compressed cache boot messages.
- Clean CompCache bit for freed pages in lookup_all_comp_pages().
- Make sure we first try to get a swap buffer without syncing its buffers to
disk. Earlier, we were syncing any dirty buffer as soon as we could lock a
page. Now that's done only in case we couldn't get any page without syncing
its buffer in order to free them. That's done cleaning up the __GFP_IO flag
if we are in the "don't sync" (= !wait) stage.
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.25
retrieving revision 1.26
diff -C2 -r1.25 -r1.26
*** aux.c 15 May 2002 18:05:36 -0000 1.25
--- aux.c 21 May 2002 18:49:06 -0000 1.26
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-05-15 12:31:27 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-05-21 12:34:55 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 402,406 ****
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: fragment hash table - %lu entries = %luB\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
--- 402,407 ----
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: hash table\n"
! "Compressed Cache: fragment (%lu entries = %luB)\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
***************
*** 410,414 ****
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space hash table - %u entries = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
--- 411,415 ----
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space (%u entries = %uB)\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.36
retrieving revision 1.37
diff -C2 -r1.36 -r1.37
*** main.c 15 May 2002 18:05:36 -0000 1.36
--- main.c 21 May 2002 18:49:06 -0000 1.37
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-15 10:06:22 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-21 12:40:53 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 105,109 ****
#endif
! comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
--- 105,109 ----
#endif
! comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm, dirty);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
***************
*** 123,127 ****
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page, buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
--- 123,127 ----
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page, buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm, dirty);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
***************
*** 151,154 ****
--- 151,156 ----
}
+ extern void comp_cache_fix_watermarks(int num_comp_pages);
+
void __init
comp_cache_init(void)
***************
*** 166,170 ****
new_num_comp_pages = min_num_comp_pages = num_comp_pages = init_num_comp_pages;
! printk("Compressed Cache: starting %s version\n", COMP_CACHE_VERSION);
/* adaptivity */
--- 168,172 ----
new_num_comp_pages = min_num_comp_pages = num_comp_pages = init_num_comp_pages;
! printk("Compressed Cache: %s\n", COMP_CACHE_VERSION);
/* adaptivity */
***************
*** 183,187 ****
for (i = 0; i < nr_preset_sizes; i++)
! printk("Compressed Cache: preset size %d: %u memory pages\n", i, preset_comp_cache[i].size);
for (i = 0; i < 10; i++)
--- 185,189 ----
for (i = 0; i < nr_preset_sizes; i++)
! printk("Compressed Cache: preset size %d: %u memory pages\n", i, preset_comp_cache[i].size);
for (i = 0; i < 10; i++)
***************
*** 189,194 ****
latest_miss = 0;
#else
! printk("Compressed Cache: initial size - %lu pages = %luKiB\n", init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
#endif
/* create slab caches */
--- 191,200 ----
latest_miss = 0;
#else
! printk("Compressed Cache: initial size\n"
! "Compressed Cache: %lu pages = %luKiB\n", init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
#endif
+
+ /* fiz zone watermarks */
+ comp_cache_fix_watermarks(init_num_comp_pages);
/* create slab caches */
Index: proc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -r1.8 -r1.9
*** proc.c 15 May 2002 18:05:36 -0000 1.8
--- proc.c 21 May 2002 18:49:06 -0000 1.9
***************
*** 2,6 ****
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-05-15 10:40:40 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-05-16 14:11:49 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 58,62 ****
static void
! comp_cache_update_comp_stats(stats_page_t * comp_page_stats, struct page * page)
{
compression_algorithm_t * algorithm = &compression_algorithms[current_algorithm];
--- 58,62 ----
static void
! comp_cache_update_comp_stats(stats_page_t * comp_page_stats, struct page * page, int dirty)
{
compression_algorithm_t * algorithm = &compression_algorithms[current_algorithm];
***************
*** 90,93 ****
--- 90,97 ----
#endif
compression_algorithms[current_algorithm].stats.comp_swap++;
+ if (dirty)
+ compression_algorithms[current_algorithm].stats.comp_dirty++;
+ else
+ compression_algorithms[current_algorithm].stats.comp_clean++;
}
***************
*** 147,151 ****
int
! compress(struct page * page, void * to, unsigned short * algorithm)
{
stats_page_t comp_page_stats;
--- 151,155 ----
int
! compress(struct page * page, void * to, unsigned short * algorithm, int dirty)
{
stats_page_t comp_page_stats;
***************
*** 155,159 ****
comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
! comp_cache_update_comp_stats(&comp_page_stats, page);
*algorithm = current_algorithm;
--- 159,163 ----
comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
! comp_cache_update_comp_stats(&comp_page_stats, page, dirty);
*algorithm = current_algorithm;
***************
*** 253,256 ****
--- 257,262 ----
" Swap Cache: %8lu\n"
" Page Cache: %8lu\n"
+ " Dirty: %8lu\n"
+ " Clean: %8lu\n"
"Decompressed Pages: %8lu\n"
" Swap Cache: %8lu\n"
***************
*** 265,268 ****
--- 271,276 ----
stats->comp_swap,
stats->comp_page,
+ stats->comp_dirty,
+ stats->comp_clean,
total_decomp_pages,
stats->decomp_swap,
Index: swapin.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v
retrieving revision 1.34
retrieving revision 1.35
diff -C2 -r1.34 -r1.35
*** swapin.c 2 May 2002 16:31:37 -0000 1.34
--- swapin.c 21 May 2002 18:49:06 -0000 1.35
***************
*** 2,6 ****
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-04-30 11:36:19 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-05-21 11:27:43 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 208,213 ****
decompress_fragment(fragment, page);
comp_cache_free_locked(fragment);
!
! __set_page_dirty(page);
page_cache_release(page);
--- 208,214 ----
decompress_fragment(fragment, page);
comp_cache_free_locked(fragment);
!
! PageClearCompCache(page);
! __set_page_dirty(page);
page_cache_release(page);
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.42
retrieving revision 1.43
diff -C2 -r1.42 -r1.43
*** swapout.c 15 May 2002 18:05:36 -0000 1.42
--- swapout.c 21 May 2002 18:49:06 -0000 1.43
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-15 10:07:24 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-21 11:23:59 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 66,71 ****
if (swp_buffer->page->buffers) {
list_del_init(swp_buffer_lh);
! if (!try_to_free_buffers(swp_buffer->page, gfp_mask)) {
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
UnlockPage(swp_buffer->page);
--- 66,80 ----
if (swp_buffer->page->buffers) {
+ unsigned int gfp_mask_buffer = gfp_mask;
+
+ /***
+ * we would like to try freeing the buffers
+ * without syncing them to disk at this first
+ * try, so let's get rid of __GFP_IO flag
+ */
+ if (!wait)
+ gfp_mask_buffer &= ~__GFP_IO;
list_del_init(swp_buffer_lh);
! if (!try_to_free_buffers(swp_buffer->page, gfp_mask_buffer)) {
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
UnlockPage(swp_buffer->page);
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-21 18:49:10
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv1879/include/linux
Modified Files:
comp_cache.h
Log Message:
- New statistics for compressed dirty and clean pages
- Cleanups regarding comp_cache_skip_* prototypes. These functions will no
longer be used in compressed cache since they impose fundamental changes
upon the VM that aren't worth it and would require long time researching and
testing.
- Some comestic changes regarding the information printed on boot process.
- Now we fix the normal zone watermarks when compressed cache is initialized
to make sure we don't put too much (and unneeded) pressure on the
uncompressed cache. For example, on a system with 48M RAM and 24M compressed
cache, the watermarks on the remaining 24M uncompressed cache will have been
wrongly computed on the 48M total memory. Now we make sure that the
watermarks are computed on the correct amount of uncompressed cache
available memory. This information (old and new watermarks) is printed along
with the initial compressed cache boot messages.
- Clean CompCache bit for freed pages in lookup_all_comp_pages().
- Make sure we first try to get a swap buffer without syncing its buffers to
disk. Earlier, we were syncing any dirty buffer as soon as we could lock a
page. Now that's done only in case we couldn't get any page without syncing
its buffer in order to free them. That's done cleaning up the __GFP_IO flag
if we are in the "don't sync" (= !wait) stage.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.66
retrieving revision 1.67
diff -C2 -r1.66 -r1.67
*** comp_cache.h 15 May 2002 18:05:35 -0000 1.66
--- comp_cache.h 21 May 2002 18:49:06 -0000 1.67
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-05-15 10:32:02 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-05-21 15:10:13 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 29,33 ****
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre3"
/* maximum compressed size of a page */
--- 29,33 ----
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre4"
/* maximum compressed size of a page */
***************
*** 243,246 ****
--- 243,247 ----
unsigned long comp_swap, decomp_swap;
unsigned long comp_page, decomp_page;
+ unsigned long comp_dirty, comp_clean;
unsigned long swap_out, page_out;
unsigned long faultin_swap, faultin_page;
***************
*** 298,302 ****
void set_fragment_algorithm(comp_cache_fragment_t *, unsigned short);
void decompress(comp_cache_fragment_t *, struct page *);
! int compress(struct page *, void *, unsigned short *);
void __init comp_cache_algorithms_init(void);
--- 299,303 ----
void set_fragment_algorithm(comp_cache_fragment_t *, unsigned short);
void decompress(comp_cache_fragment_t *, struct page *);
! int compress(struct page *, void *, unsigned short *, int);
void __init comp_cache_algorithms_init(void);
***************
*** 330,337 ****
/* main.c */
#ifdef CONFIG_COMP_CACHE
- inline int comp_cache_skip_buffer_freeing(void);
- inline int comp_cache_skip_slab_shrunk(void);
- inline int comp_cache_skip_dicache_shrunk(void);
-
int compress_page(struct page *, int, unsigned int);
void comp_cache_init(void);
--- 331,334 ----
***************
*** 350,357 ****
#else
- static inline int comp_cache_skip_buffer_freeing(void) { return 0; }
- static inline int comp_cache_skip_slab_shrunk(void) { return 0; }
- static inline int comp_cache_skip_dicache_shrunk(void) { return 0; }
-
static inline void comp_cache_init(void) {};
static inline int compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask) { return writepage(page); }
--- 347,350 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-21 18:49:10
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv1879/mm
Modified Files:
memory.c page_alloc.c
Log Message:
- New statistics for compressed dirty and clean pages
- Cleanups regarding comp_cache_skip_* prototypes. These functions will no
longer be used in compressed cache since they impose fundamental changes
upon the VM that aren't worth it and would require long time researching and
testing.
- Some comestic changes regarding the information printed on boot process.
- Now we fix the normal zone watermarks when compressed cache is initialized
to make sure we don't put too much (and unneeded) pressure on the
uncompressed cache. For example, on a system with 48M RAM and 24M compressed
cache, the watermarks on the remaining 24M uncompressed cache will have been
wrongly computed on the 48M total memory. Now we make sure that the
watermarks are computed on the correct amount of uncompressed cache
available memory. This information (old and new watermarks) is printed along
with the initial compressed cache boot messages.
- Clean CompCache bit for freed pages in lookup_all_comp_pages().
- Make sure we first try to get a swap buffer without syncing its buffers to
disk. Earlier, we were syncing any dirty buffer as soon as we could lock a
page. Now that's done only in case we couldn't get any page without syncing
its buffer in order to free them. That's done cleaning up the __GFP_IO flag
if we are in the "don't sync" (= !wait) stage.
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.28
retrieving revision 1.29
diff -C2 -r1.28 -r1.29
*** memory.c 2 May 2002 16:31:37 -0000 1.28
--- memory.c 21 May 2002 18:49:06 -0000 1.29
***************
*** 1141,1145 ****
/* major fault */
ret = 2;
! }
page = read_swap_cache_async(entry);
if (!page) {
--- 1141,1145 ----
/* major fault */
ret = 2;
! }
page = read_swap_cache_async(entry);
if (!page) {
Index: page_alloc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v
retrieving revision 1.16
retrieving revision 1.17
diff -C2 -r1.16 -r1.17
*** page_alloc.c 26 Feb 2002 20:59:01 -0000 1.16
--- page_alloc.c 21 May 2002 18:49:06 -0000 1.17
***************
*** 626,629 ****
--- 626,659 ----
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+ #ifdef CONFIG_COMP_CACHE
+ void comp_cache_fix_watermarks(int num_comp_pages)
+ {
+ unsigned long mask;
+ int j = ZONE_NORMAL;
+ zone_t *zone = contig_page_data.node_zones + j;
+
+ if (num_comp_pages > zone->size)
+ num_comp_pages = zone->size;
+
+ /* whoops: that should be zone->size minus zholes. Since
+ * zholes is always 0 when calling free_area_init_core(), I
+ * guess we don't have to worry about that now */
+ mask = ((zone->size - num_comp_pages)/zone_balance_ratio[j]);
+
+ if (mask < zone_balance_min[j])
+ mask = zone_balance_min[j];
+ else if (mask > zone_balance_max[j])
+ mask = zone_balance_max[j];
+
+ printk("Compressed Cache: page watermarks (normal zone)\n"
+ "Compressed Cache: (%lu, %lu, %lu) -> ",
+ zone->pages_min, zone->pages_low, zone->pages_high);
+ zone->pages_min = mask;
+ zone->pages_low = mask*2;
+ zone->pages_high = mask*3;
+ printk("(%lu, %lu, %lu)\n", zone->pages_min, zone->pages_low, zone->pages_high);
+ }
+ #endif
+
/*
* Set up the zone data structures:
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-15 18:05:39
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv11239/mm/comp_cache
Modified Files:
adaptivity.c aux.c free.c main.c proc.c swapout.c vswap.c
Log Message:
- Fix meminfo output when compressed cache is enabled. Thus, cached memory
and swap cached memory actually corresponds to the cache and swap cached
memory in non-compressed cache. Besides, adds two lines to its output with
information about compressed cache memory consumption like how much memory
was reserved and how much is being used at that moment.
Index: adaptivity.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v
retrieving revision 1.17
retrieving revision 1.18
diff -C2 -r1.17 -r1.18
*** adaptivity.c 28 Apr 2002 20:51:35 -0000 1.17
--- adaptivity.c 15 May 2002 18:05:36 -0000 1.18
***************
*** 2,6 ****
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-04-03 12:33:28 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-05-15 10:31:44 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 24,28 ****
int latest_uncomp_misses[10], latest_miss;
- #define comp_cache_used_space ((real_num_comp_pages * PAGE_SIZE) - comp_cache_free_space)
#define comp_cache_total_space (preset_comp_cache[i].size * PAGE_SIZE)
--- 24,27 ----
***************
*** 62,66 ****
printk("nr_uncomp_misses %d\n", nr_uncomp_misses);
! printk("free space %ld\n", (comp_cache_free_space * 100)/(real_num_comp_pages * PAGE_SIZE));
/* compute costs and benefits - smaller sizes*/
--- 61,65 ----
printk("nr_uncomp_misses %d\n", nr_uncomp_misses);
! printk("free space %ld\n", (comp_cache_free_space * 100)/(num_comp_pages * PAGE_SIZE));
/* compute costs and benefits - smaller sizes*/
***************
*** 103,107 ****
}
! if (comp_cache_free_space > 0.30 * real_num_comp_pages * PAGE_SIZE)
goto out;
--- 102,106 ----
}
! if (comp_cache_free_space > 0.30 * num_comp_pages * PAGE_SIZE)
goto out;
***************
*** 171,175 ****
unsigned int i, new_fragment_hash_bits, new_fragment_hash_order, hash_index;
! new_fragment_hash_size = 3 * real_num_comp_pages * sizeof(comp_cache_fragment_t *);
new_fragment_hash = create_fragment_hash(&new_fragment_hash_size, &new_fragment_hash_bits, &new_fragment_hash_order);
--- 170,174 ----
unsigned int i, new_fragment_hash_bits, new_fragment_hash_order, hash_index;
! new_fragment_hash_size = 3 * num_comp_pages * sizeof(comp_cache_fragment_t *);
new_fragment_hash = create_fragment_hash(&new_fragment_hash_size, &new_fragment_hash_bits, &new_fragment_hash_order);
***************
*** 181,185 ****
/* if we are growing the hash, but couldn't allocate a bigger
* chunk, let's back out and keep the current one */
! if (3 * real_num_comp_pages > fragment_hash_size &&
new_fragment_hash_order <= fragment_hash_order)
goto free_new_hash;
--- 180,184 ----
/* if we are growing the hash, but couldn't allocate a bigger
* chunk, let's back out and keep the current one */
! if (3 * num_comp_pages > fragment_hash_size &&
new_fragment_hash_order <= fragment_hash_order)
goto free_new_hash;
***************
*** 225,229 ****
* shrink_vswap(unsigned long) - shrinks vswap adressing table from
* its current size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES,
! * its new size in function of real_num_comp_pages.
*
* we try to shrink the vswap at once, but that will depend on getting
--- 224,228 ----
* shrink_vswap(unsigned long) - shrinks vswap adressing table from
* its current size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES,
! * its new size in function of num_comp_pages.
*
* we try to shrink the vswap at once, but that will depend on getting
***************
*** 425,429 ****
/* grow_vswap(void) - grows vswap adressing table from its current
* size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, its new size
! * in function of real_num_comp_pages.
*
* we allocate the new vswap table and allocates the needed vswap
--- 424,428 ----
/* grow_vswap(void) - grows vswap adressing table from its current
* size (vswap_current_num_entries) to NUM_VSWAP_ENTRIES, its new size
! * in function of num_comp_pages.
*
* we allocate the new vswap table and allocates the needed vswap
***************
*** 495,499 ****
static inline int
fragment_hash_needs_to_shrink(void) {
! unsigned long new_fragment_hash_size = (3 * real_num_comp_pages) * sizeof(comp_cache_fragment_t *);
/* if we shrink the hash table an order, will the data fit in
--- 494,498 ----
static inline int
fragment_hash_needs_to_shrink(void) {
! unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(comp_cache_fragment_t *);
/* if we shrink the hash table an order, will the data fit in
***************
*** 526,530 ****
comp_cache_needs_to_shrink(void) {
/* obvious condition */
! if (new_num_comp_pages >= real_num_comp_pages)
return 0;
--- 525,529 ----
comp_cache_needs_to_shrink(void) {
/* obvious condition */
! if (new_num_comp_pages >= num_comp_pages)
return 0;
***************
*** 565,570 ****
comp_cache_freeable_space -= PAGE_SIZE;
comp_cache_free_space -= PAGE_SIZE;
! real_num_comp_pages--;
! //printk("shrink new %lu real %lu\n", new_num_comp_pages, real_num_comp_pages);
out:
--- 564,569 ----
comp_cache_freeable_space -= PAGE_SIZE;
comp_cache_free_space -= PAGE_SIZE;
! num_comp_pages--;
! //printk("shrink new %lu real %lu\n", new_num_comp_pages, num_comp_pages);
out:
***************
*** 598,606 ****
}
! #define comp_cache_needs_to_grow() (new_num_comp_pages > real_num_comp_pages)
static inline int
fragment_hash_needs_to_grow(void) {
! unsigned long new_fragment_hash_size = (3 * real_num_comp_pages) * sizeof(comp_cache_fragment_t *);
/* do we really need a bigger hash table? */
--- 597,605 ----
}
! #define comp_cache_needs_to_grow() (new_num_comp_pages > num_comp_pages)
static inline int
fragment_hash_needs_to_grow(void) {
! unsigned long new_fragment_hash_size = (3 * num_comp_pages) * sizeof(comp_cache_fragment_t *);
/* do we really need a bigger hash table? */
***************
*** 644,649 ****
comp_cache_freeable_space += PAGE_SIZE;
comp_cache_free_space += PAGE_SIZE;
! real_num_comp_pages++;
! //printk("grow real %lu\n", real_num_comp_pages);
}
--- 643,648 ----
comp_cache_freeable_space += PAGE_SIZE;
comp_cache_free_space += PAGE_SIZE;
! num_comp_pages++;
! //printk("grow real %lu\n", num_comp_pages);
}
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.24
retrieving revision 1.25
diff -C2 -r1.24 -r1.25
*** aux.c 28 Apr 2002 20:51:35 -0000 1.24
--- aux.c 15 May 2002 18:05:36 -0000 1.25
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-04-18 13:00:30 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-05-15 12:31:27 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 395,399 ****
/* fragment hash table (code heavily based on
* page_cache_init():filemap.c */
! fragment_hash_size = 3 * real_num_comp_pages * sizeof(comp_cache_fragment_t *);
fragment_hash_used = 0;
fragment_hash = create_fragment_hash(&fragment_hash_size, &fragment_hash_bits, &fragment_hash_order);
--- 395,399 ----
/* fragment hash table (code heavily based on
* page_cache_init():filemap.c */
! fragment_hash_size = 3 * num_comp_pages * sizeof(comp_cache_fragment_t *);
fragment_hash_used = 0;
fragment_hash = create_fragment_hash(&fragment_hash_size, &fragment_hash_bits, &fragment_hash_order);
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.28
retrieving revision 1.29
diff -C2 -r1.28 -r1.29
*** free.c 8 May 2002 20:24:38 -0000 1.28
--- free.c 15 May 2002 18:05:36 -0000 1.29
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-05-08 16:01:46 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-05-15 10:05:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 71,75 ****
list_del_init(&fragment->mapping_list);
fragment->mapping->nrpages--;
! atomic_dec(&page_cache_size);
comp_cache_free_space += fragment->compressed_size;
}
--- 71,76 ----
list_del_init(&fragment->mapping_list);
fragment->mapping->nrpages--;
! if (PageSwapCache(fragment))
! num_swapper_fragments--;
comp_cache_free_space += fragment->compressed_size;
}
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.35
retrieving revision 1.36
diff -C2 -r1.35 -r1.36
*** main.c 8 May 2002 20:24:39 -0000 1.35
--- main.c 15 May 2002 18:05:36 -0000 1.36
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-08 16:41:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-05-15 10:06:22 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 20,24 ****
/* compressed cache */
! unsigned long real_num_comp_pages = 0;
unsigned long init_num_comp_pages = 0;
--- 20,26 ----
/* compressed cache */
! unsigned long num_comp_pages = 0;
! unsigned long num_swapper_fragments = 0;
!
unsigned long init_num_comp_pages = 0;
***************
*** 162,166 ****
#endif
init_num_comp_pages = num_physpages * 0.10;
! new_num_comp_pages = min_num_comp_pages = real_num_comp_pages = init_num_comp_pages;
printk("Compressed Cache: starting %s version\n", COMP_CACHE_VERSION);
--- 164,168 ----
#endif
init_num_comp_pages = num_physpages * 0.10;
! new_num_comp_pages = min_num_comp_pages = num_comp_pages = init_num_comp_pages;
printk("Compressed Cache: starting %s version\n", COMP_CACHE_VERSION);
***************
*** 203,211 ****
/* initialize each comp cache entry */
! for (i = 0; i < real_num_comp_pages; i++) {
page = alloc_page(GFP_KERNEL);
init_comp_page(&comp_page, page);
}
! comp_cache_free_space = real_num_comp_pages * PAGE_SIZE;
/* initialize our algorithms statistics array */
--- 205,213 ----
/* initialize each comp cache entry */
! for (i = 0; i < num_comp_pages; i++) {
page = alloc_page(GFP_KERNEL);
init_comp_page(&comp_page, page);
}
! comp_cache_free_space = num_comp_pages * PAGE_SIZE;
/* initialize our algorithms statistics array */
Index: proc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** proc.c 28 Apr 2002 20:51:35 -0000 1.7
--- proc.c 15 May 2002 18:05:36 -0000 1.8
***************
*** 2,6 ****
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-04-18 15:32:34 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-05-15 10:40:40 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 243,246 ****
--- 243,249 ----
total_faultin_pages = stats->faultin_swap + stats->faultin_page;
+ if (!total_comp_pages)
+ return;
+
*length += sprintf(page + *length, "compressed cache - statistics\n");
*length += sprintf(page + *length, "algorithm %s%s\n", algorithm->name, current_msg);
***************
*** 271,277 ****
stats->faultin_swap,
stats->faultin_page);
-
- if (!total_comp_pages)
- return;
mean_size = big_division(stats->comp_size_sum, total_comp_pages);
--- 274,277 ----
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.41
retrieving revision 1.42
diff -C2 -r1.41 -r1.42
*** swapout.c 14 May 2002 18:59:28 -0000 1.41
--- swapout.c 15 May 2002 18:05:36 -0000 1.42
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-14 14:46:54 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-15 10:07:24 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 299,303 ****
BUG();
! maxtry = real_num_comp_pages >> 1;
page_cache_get(page);
--- 299,303 ----
BUG();
! maxtry = num_comp_pages >> 1;
page_cache_get(page);
***************
*** 305,309 ****
while (maxtry--) {
aux_comp_size = compressed_size;
! maxscan = real_num_comp_pages >> 3;
while (maxscan--) {
--- 305,309 ----
while (maxtry--) {
aux_comp_size = compressed_size;
! maxscan = num_comp_pages >> 3;
while (maxscan--) {
***************
*** 469,473 ****
list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
page->mapping->nrpages++;
! atomic_inc(&page_cache_size);
add_fragment_to_hash_table(fragment);
--- 469,478 ----
list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
page->mapping->nrpages++;
!
! /* we need to account the number of fragments that are from
! * swapper_space to correctly count the cached, swapcached
! * memory in /proc/meminfo */
! if (PageSwapCache(page))
! num_swapper_fragments++;
add_fragment_to_hash_table(fragment);
Index: vswap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v
retrieving revision 1.29
retrieving revision 1.30
diff -C2 -r1.29 -r1.30
*** vswap.c 26 Mar 2002 12:35:10 -0000 1.29
--- vswap.c 15 May 2002 18:05:36 -0000 1.30
***************
*** 2,6 ****
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-03-26 09:22:24 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-05-15 09:36:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 45,48 ****
--- 45,51 ----
unsigned long vswap_num_used_entries;
+ /* number of vswap entries that have swap cache pages */
+ unsigned long vswap_num_swap_cache;
+
/* index to the last used vswap entry - used when shrinking vswap */
unsigned int vswap_last_used;
***************
*** 77,81 ****
* compressed cache, even if we have to move fragments in
* order to make room for any vswap entry */
! if (vswap_num_reserved_entries > real_num_comp_pages)
return 0;
--- 80,84 ----
* compressed cache, even if we have to move fragments in
* order to make room for any vswap entry */
! if (vswap_num_reserved_entries > num_comp_pages)
return 0;
***************
*** 85,89 ****
return 0;
! available_mean_size = (unsigned short) (comp_cache_freeable_space/real_num_comp_pages);
if (available_mean_size > PAGE_SIZE)
--- 88,92 ----
return 0;
! available_mean_size = (unsigned short) (comp_cache_freeable_space/num_comp_pages);
if (available_mean_size > PAGE_SIZE)
***************
*** 587,590 ****
--- 590,594 ----
vswap_address[offset]->swap_cache_page = page;
+ vswap_num_swap_cache++;
}
***************
*** 618,621 ****
--- 622,626 ----
vswap_address[offset]->swap_cache_page = NULL;
+ vswap_num_swap_cache--;
}
***************
*** 665,673 ****
vswap_last_used = NUM_VSWAP_ENTRIES - 1;
vswap_num_used_entries = 0;
for (i = 0; i < NUM_VSWAP_ENTRIES; i++)
vswap_alloc_and_init(vswap_address, i);
! comp_cache_freeable_space = PAGE_SIZE * real_num_comp_pages;
last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short));
--- 670,679 ----
vswap_last_used = NUM_VSWAP_ENTRIES - 1;
vswap_num_used_entries = 0;
+ vswap_num_swap_cache = 0;
for (i = 0; i < NUM_VSWAP_ENTRIES; i++)
vswap_alloc_and_init(vswap_address, i);
! comp_cache_freeable_space = PAGE_SIZE * num_comp_pages;
last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short));
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-15 18:05:38
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv11239/include/linux
Modified Files:
comp_cache.h
Log Message:
- Fix meminfo output when compressed cache is enabled. Thus, cached memory
and swap cached memory actually corresponds to the cache and swap cached
memory in non-compressed cache. Besides, adds two lines to its output with
information about compressed cache memory consumption like how much memory
was reserved and how much is being used at that moment.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.65
retrieving revision 1.66
diff -C2 -r1.65 -r1.66
*** comp_cache.h 2 May 2002 16:31:37 -0000 1.65
--- comp_cache.h 15 May 2002 18:05:35 -0000 1.66
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-04-30 10:47:23 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-05-15 10:32:02 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 34,40 ****
#define MAX_COMPRESSED_SIZE 4500
! #define NUM_VSWAP_ENTRIES (3 * real_num_comp_pages)
! extern unsigned long real_num_comp_pages, new_num_comp_pages, max_num_comp_pages;
struct pte_list {
--- 34,40 ----
#define MAX_COMPRESSED_SIZE 4500
! #define NUM_VSWAP_ENTRIES (3 * num_comp_pages)
! extern unsigned long num_comp_pages, num_swapper_fragments, new_num_comp_pages, max_num_comp_pages;
struct pte_list {
***************
*** 344,347 ****
--- 344,349 ----
extern unsigned long comp_cache_free_space;
+ #define comp_cache_used_space ((num_comp_pages * PAGE_SIZE) - comp_cache_free_space)
+
#define add_swap_miss() (nr_swap_misses++)
#define add_compressed_cache_miss() (nr_compressed_cache_misses++)
***************
*** 384,387 ****
--- 386,390 ----
extern unsigned long vswap_current_num_entries;
extern unsigned long vswap_num_used_entries;
+ extern unsigned long vswap_num_swap_cache;
extern unsigned int vswap_last_used;
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-15 18:05:38
|
Update of /cvsroot/linuxcompressed/linux/fs/proc In directory usw-pr-cvs1:/tmp/cvs-serv11239/fs/proc Modified Files: proc_misc.c Log Message: - Fix meminfo output when compressed cache is enabled. Thus, cached memory and swap cached memory actually corresponds to the cache and swap cached memory in non-compressed cache. Besides, adds two lines to its output with information about compressed cache memory consumption like how much memory was reserved and how much is being used at that moment. Index: proc_misc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/proc/proc_misc.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** proc_misc.c 13 Dec 2001 19:12:57 -0000 1.3 --- proc_misc.c 15 May 2002 18:05:35 -0000 1.4 *************** *** 164,167 **** --- 164,171 ---- "Buffers: %8lu kB\n" "Cached: %8lu kB\n" + #ifdef CONFIG_COMP_CACHE + "CCacheAlloc: %8lu kB\n" + "CCacheUsed: %8lu kB\n" + #endif "SwapCached: %8lu kB\n" "Active: %8u kB\n" *************** *** 177,182 **** --- 181,193 ---- K(i.sharedram), K(i.bufferram), + #ifdef CONFIG_COMP_CACHE + K(pg_size + num_swapper_fragments - swapper_space.nrpages), + K(num_comp_pages), + comp_cache_used_space/1024, + K(swapper_space.nrpages - num_swapper_fragments), + #else K(pg_size - swapper_space.nrpages), K(swapper_space.nrpages), + #endif K(nr_active_pages), K(nr_inactive_pages), |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-14 18:59:32
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv18902/mm/comp_cache
Modified Files:
swapout.c
Log Message:
- One more try to fix the hang noticed by Paolo Ciarrocchi when running
mmap001. First of all, we were calling try_to_free_buffers() without the page
lock. Secondly, the swap buffer was not removed from swap buffer list before
try_to_free_buffers() and since this function may sleep, we could have two
code paths calling try_to_free_buffers() at the same time, what would
corrupt the structures.
To fix those bugs, now we hold the page lock when handling swap buffer pages
and also remove the swap buffer from any list right before calling
try_to_free_buffers().
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.40
retrieving revision 1.41
diff -C2 -r1.40 -r1.41
*** swapout.c 9 May 2002 12:31:01 -0000 1.40
--- swapout.c 14 May 2002 18:59:28 -0000 1.41
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-09 09:20:50 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-14 14:46:54 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 53,61 ****
swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list);
! if (PageLocked(swp_buffer->page)) {
if (!wait)
continue;
list_del_init(swp_buffer_lh);
! wait_on_page(swp_buffer->page);
}
--- 53,61 ----
swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list);
! if (TryLockPage(swp_buffer->page)) {
if (!wait)
continue;
list_del_init(swp_buffer_lh);
! lock_page(swp_buffer->page);
}
***************
*** 66,72 ****
if (swp_buffer->page->buffers) {
if (!try_to_free_buffers(swp_buffer->page, gfp_mask)) {
- list_del(swp_buffer_lh);
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
continue;
}
--- 66,73 ----
if (swp_buffer->page->buffers) {
+ list_del_init(swp_buffer_lh);
if (!try_to_free_buffers(swp_buffer->page, gfp_mask)) {
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
+ UnlockPage(swp_buffer->page);
continue;
}
***************
*** 101,106 ****
--- 102,110 ----
if (!CompFragmentIO(fragment) || CompFragmentFreed(fragment)) {
CompFragmentClearIO(fragment);
+ UnlockPage(swp_buffer->page);
return NULL;
}
+
+ UnlockPage(swp_buffer->page);
if (wait)
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-09 12:31:04
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv26703/mm/comp_cache
Modified Files:
swapout.c
Log Message:
- Removed pending comp_cache_skip* in fs/buffer.c. The cvs code now compiles.
- Hopefully fixed a bug hit by Paolo Ciarrocchi which would hang his
computer when running mmap001.
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.39
retrieving revision 1.40
diff -C2 -r1.39 -r1.40
*** swapout.c 8 May 2002 20:24:40 -0000 1.39
--- swapout.c 9 May 2002 12:31:01 -0000 1.40
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-08 16:01:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-05-09 09:20:50 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 60,67 ****
--- 60,71 ----
}
+ /* its fragment was added to locked_pages list below,
+ * right before being returned to the caller, so let's
+ * remove it now from any mapping->*_pages list */
list_del(&swp_buffer->page->list);
if (swp_buffer->page->buffers) {
if (!try_to_free_buffers(swp_buffer->page, gfp_mask)) {
+ list_del(swp_buffer_lh);
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
continue;
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-09 12:31:04
|
Update of /cvsroot/linuxcompressed/linux/fs
In directory usw-pr-cvs1:/tmp/cvs-serv26703/fs
Modified Files:
buffer.c
Log Message:
- Removed pending comp_cache_skip* in fs/buffer.c. The cvs code now compiles.
- Hopefully fixed a bug hit by Paolo Ciarrocchi which would hang his
computer when running mmap001.
Index: buffer.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/fs/buffer.c,v
retrieving revision 1.14
retrieving revision 1.15
diff -C2 -r1.14 -r1.15
*** buffer.c 2 May 2002 16:31:36 -0000 1.14
--- buffer.c 9 May 2002 12:31:01 -0000 1.15
***************
*** 2696,2701 ****
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- if (comp_cache_skip_buffer_freeing())
- return 0;
gfp_mask = pf_gfp_mask(gfp_mask);
if (gfp_mask & __GFP_IO) {
--- 2696,2699 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-08 20:24:49
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv17200/mm/comp_cache Modified Files: free.c main.c swapout.c Log Message: Some bug fixes: - Cached memory accounting fix Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553794&group_id=13472&atid=113472 - Remove all comp_cache_skip* functions. At least temporarily, there were removed because I am not sure what are the consequences of skipping doing some stuff. For example, skip writing dirty buffers turns out to be a horrible choice for dbench, which gets a terrible performance. - Fixes a potential oops in compressed_pages() to wrong return value from get_comp_cache_page(). Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553878&group_id=13472&atid=113472 - Makes compressed cache work correctly if support for page cache is disabled. Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553408&group_id=13472&atid=113472 - Fixes a kernel BUG() at vmscan.c:366 (active page on inactive list) Details: http://sourceforge.net/tracker/index.php?func=detail&aid=551996&group_id=13472&atid=113472 Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.27 retrieving revision 1.28 diff -C2 -r1.27 -r1.28 *** free.c 28 Apr 2002 20:51:35 -0000 1.27 --- free.c 8 May 2002 20:24:38 -0000 1.28 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-04-02 09:31:32 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-05-08 16:01:46 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 71,74 **** --- 71,75 ---- list_del_init(&fragment->mapping_list); fragment->mapping->nrpages--; + atomic_dec(&page_cache_size); comp_cache_free_space += fragment->compressed_size; } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.34 retrieving revision 1.35 diff -C2 -r1.34 -r1.35 *** main.c 28 Apr 2002 20:51:35 -0000 1.34 --- main.c 8 May 2002 20:24:39 -0000 1.35 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-04-28 16:51:46 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-05-08 16:41:47 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 35,76 **** int nr_compressed_cache_misses; - static unsigned int skip_buffer = 0, skip_slab = 0, skip_dicache = 0; - extern unsigned long num_physpages; extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int); - - /*** - * Skips a number of dirty buffer writes when we have much memory - * pressure, since we want the system to put much more pressure on - * pages from page and swap cache in order to have these kind of pages - * compressed. - * - * TODO: make the number of skips dependent on relative compressed - * cache size. - */ - inline int - comp_cache_skip_buffer_freeing(void) - { - return (skip_buffer = ++skip_buffer % 500); - } - - /*** - * Same as above, but skips shrinking slab cache (kmem_cache_reap()). - */ - inline int - comp_cache_skip_slab_shrunk(void) - { - return (skip_slab = ++skip_slab % 500); - } - - /*** - * Same as above, but skips shrinking dcache and icache. - */ - inline int - comp_cache_skip_dicache_shrunk(void) - { - return (skip_dicache = ++skip_dicache % 500); - } inline int --- 35,41 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.38 retrieving revision 1.39 diff -C2 -r1.38 -r1.39 *** swapout.c 28 Apr 2002 20:51:35 -0000 1.38 --- swapout.c 8 May 2002 20:24:40 -0000 1.39 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-04-28 17:21:40 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-05-08 16:01:38 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 461,464 **** --- 461,465 ---- list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages); page->mapping->nrpages++; + atomic_inc(&page_cache_size); add_fragment_to_hash_table(fragment); *************** *** 480,483 **** --- 481,486 ---- add_comp_page_to_hash_table(comp_page); UnlockPage(comp_page->page); + comp_page = NULL; + if (!dirty) goto out_unlock; |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-08 20:24:47
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv17200/mm Modified Files: filemap.c vmscan.c Log Message: Some bug fixes: - Cached memory accounting fix Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553794&group_id=13472&atid=113472 - Remove all comp_cache_skip* functions. At least temporarily, there were removed because I am not sure what are the consequences of skipping doing some stuff. For example, skip writing dirty buffers turns out to be a horrible choice for dbench, which gets a terrible performance. - Fixes a potential oops in compressed_pages() to wrong return value from get_comp_cache_page(). Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553878&group_id=13472&atid=113472 - Makes compressed cache work correctly if support for page cache is disabled. Details: http://sourceforge.net/tracker/index.php?func=detail&aid=553408&group_id=13472&atid=113472 - Fixes a kernel BUG() at vmscan.c:366 (active page on inactive list) Details: http://sourceforge.net/tracker/index.php?func=detail&aid=551996&group_id=13472&atid=113472 Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.24 retrieving revision 1.25 diff -C2 -r1.24 -r1.25 *** filemap.c 28 Apr 2002 20:51:34 -0000 1.24 --- filemap.c 8 May 2002 20:24:34 -0000 1.25 *************** *** 162,166 **** if (mapping->host) mark_inode_dirty_pages(mapping->host); ! #ifdef CONFIG_COMP_PAGE_CACHE if (PageTestandClearCompCache(page)) invalidate_comp_cache(mapping, page->index); --- 162,166 ---- if (mapping->host) mark_inode_dirty_pages(mapping->host); ! #ifdef CONFIG_COMP_CACHE if (PageTestandClearCompCache(page)) invalidate_comp_cache(mapping, page->index); Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.32 retrieving revision 1.33 diff -C2 -r1.32 -r1.33 *** vmscan.c 2 May 2002 16:31:37 -0000 1.32 --- vmscan.c 8 May 2002 20:24:36 -0000 1.33 *************** *** 511,520 **** /* compress it if it's a clean page that has not been * compressed in a previous iteration */ ! if (compress_clean_page(page, gfp_mask)) { ! list_del(entry); ! list_add_tail(entry, &inactive_list); ! max_scan++; continue; - } /* point of no return */ --- 511,516 ---- /* compress it if it's a clean page that has not been * compressed in a previous iteration */ ! if (compress_clean_page(page, gfp_mask)) continue; /* point of no return */ *************** *** 584,597 **** unsigned long ratio; - /* if compressed cache is enable, we should want to have much - * more pressure on swap/page cache than on other caches */ - if (comp_cache_skip_slab_shrunk()) - goto skip_slab_cache; - nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) return 0; - skip_slab_cache: nr_pages = chunk_size; /* try to keep the active list 2/3 of the size of the cache */ --- 580,587 ---- *************** *** 603,609 **** return 0; - if (comp_cache_skip_dicache_shrunk()) - return nr_pages; - shrink_dcache_memory(priority, gfp_mask); shrink_icache_memory(priority, gfp_mask); --- 593,596 ---- |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-02 16:31:40
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv5957/include/linux Modified Files: comp_cache.h Log Message: - Fixed compilation error in buffer.c when compiling it without compressed cache. - Fixed oops caused when running fillmem due to a null pointer - Fixed major/minor faults for compressed cache. Now we have correct data about when we actually had to perform IO to service a page fault. - Improved support for clean pages. Now the clean page is compressed and right away freed in shrink_cache(). - When a pte in do_no_page() has write access to the page, it's already set as dirty. So, in the same way done in do_swap_page(), we already remove the its fragment from compressed cache since it will need to be compressed again anyway. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.64 retrieving revision 1.65 diff -C2 -r1.64 -r1.65 *** comp_cache.h 28 Apr 2002 20:51:33 -0000 1.64 --- comp_cache.h 2 May 2002 16:31:37 -0000 1.65 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-04-22 14:55:16 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-04-30 10:47:23 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 29,33 **** #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23pre2" /* maximum compressed size of a page */ --- 29,33 ---- #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.23pre3" /* maximum compressed size of a page */ |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-02 16:31:40
|
Update of /cvsroot/linuxcompressed/linux/fs
In directory usw-pr-cvs1:/tmp/cvs-serv5957/fs
Modified Files:
buffer.c
Log Message:
- Fixed compilation error in buffer.c when compiling it without compressed
cache.
- Fixed oops caused when running fillmem due to a null pointer
- Fixed major/minor faults for compressed cache. Now we have correct data
about when we actually had to perform IO to service a page fault.
- Improved support for clean pages. Now the clean page is compressed and
right away freed in shrink_cache().
- When a pte in do_no_page() has write access to the page, it's already set
as dirty. So, in the same way done in do_swap_page(), we already remove the
its fragment from compressed cache since it will need to be compressed again
anyway.
Index: buffer.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/fs/buffer.c,v
retrieving revision 1.13
retrieving revision 1.14
diff -C2 -r1.13 -r1.14
*** buffer.c 28 Apr 2002 20:51:33 -0000 1.13
--- buffer.c 2 May 2002 16:31:36 -0000 1.14
***************
*** 48,51 ****
--- 48,52 ----
#include <linux/module.h>
#include <linux/completion.h>
+ #include <linux/comp_cache.h>
#include <asm/uaccess.h>
***************
*** 2695,2701 ****
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
gfp_mask = pf_gfp_mask(gfp_mask);
if (gfp_mask & __GFP_IO) {
! if (((gfp_mask & __GFP_HIGHIO) || !PageHighMem(page)) && !comp_cache_skip_buffer_freeing()) {
if (sync_page_buffers(bh)) {
/* no IO or waiting next time */
--- 2696,2704 ----
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
+ if (comp_cache_skip_buffer_freeing())
+ return 0;
gfp_mask = pf_gfp_mask(gfp_mask);
if (gfp_mask & __GFP_IO) {
! if ((gfp_mask & __GFP_HIGHIO) || !PageHighMem(page)) {
if (sync_page_buffers(bh)) {
/* no IO or waiting next time */
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-02 16:31:40
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv5957/mm/comp_cache Modified Files: swapin.c Log Message: - Fixed compilation error in buffer.c when compiling it without compressed cache. - Fixed oops caused when running fillmem due to a null pointer - Fixed major/minor faults for compressed cache. Now we have correct data about when we actually had to perform IO to service a page fault. - Improved support for clean pages. Now the clean page is compressed and right away freed in shrink_cache(). - When a pte in do_no_page() has write access to the page, it's already set as dirty. So, in the same way done in do_swap_page(), we already remove the its fragment from compressed cache since it will need to be compressed again anyway. Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.33 retrieving revision 1.34 diff -C2 -r1.33 -r1.34 *** swapin.c 28 Apr 2002 20:51:35 -0000 1.33 --- swapin.c 2 May 2002 16:31:37 -0000 1.34 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-04-28 17:19:57 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-04-30 11:36:19 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 41,46 **** if (likely(!PageTestandClearCompCache(page))) goto out; ! ! err = find_comp_page(page->mapping, page->index, &fragment); if (err) --- 41,49 ---- if (likely(!PageTestandClearCompCache(page))) goto out; ! ! /* we may have a null page->mapping if the page have been ! * removed from swap cache before flushing its compressed ! * cache entry, what may happen in do_swap_page() */ ! err = find_comp_page(page->mapping?:&swapper_space, page->index, &fragment); if (err) |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-05-02 16:31:40
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv5957/mm
Modified Files:
memory.c vmscan.c
Log Message:
- Fixed compilation error in buffer.c when compiling it without compressed
cache.
- Fixed oops caused when running fillmem due to a null pointer
- Fixed major/minor faults for compressed cache. Now we have correct data
about when we actually had to perform IO to service a page fault.
- Improved support for clean pages. Now the clean page is compressed and
right away freed in shrink_cache().
- When a pte in do_no_page() has write access to the page, it's already set
as dirty. So, in the same way done in do_swap_page(), we already remove the
its fragment from compressed cache since it will need to be compressed again
anyway.
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.27
retrieving revision 1.28
diff -C2 -r1.27 -r1.28
*** memory.c 28 Apr 2002 20:51:34 -0000 1.27
--- memory.c 2 May 2002 16:31:37 -0000 1.28
***************
*** 1135,1141 ****
if (!page) {
! comp_cache_fragment_t * fragment;
! if (find_comp_page(&swapper_space, entry.val, &fragment))
swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
--- 1135,1145 ----
if (!page) {
! comp_cache_fragment_t * fragment;
! /* perform readahead only if the page is on disk */
! if (find_comp_page(&swapper_space, entry.val, &fragment)) {
swapin_readahead(entry);
+ /* major fault */
+ ret = 2;
+ }
page = read_swap_cache_async(entry);
if (!page) {
***************
*** 1150,1156 ****
return retval;
}
-
- /* Had to read the page from swap area: Major fault */
- ret = 2;
}
--- 1154,1157 ----
***************
*** 1262,1265 ****
--- 1263,1267 ----
struct page * new_page;
pte_t entry;
+ int ret = 2; /* usually major fault */
if (!vma->vm_ops || !vma->vm_ops->nopage)
***************
*** 1306,1311 ****
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
! if (write_access)
entry = pte_mkwrite(pte_mkdirty(entry));
set_pte(page_table, entry);
} else {
--- 1308,1317 ----
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
! if (PageCompCache(new_page))
! ret = 1;
! if (write_access) {
entry = pte_mkwrite(pte_mkdirty(entry));
+ flush_comp_cache(new_page);
+ }
set_pte(page_table, entry);
} else {
***************
*** 1319,1323 ****
update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock);
! return 2; /* Major fault */
}
--- 1325,1329 ----
update_mmu_cache(vma, address, entry);
spin_unlock(&mm->page_table_lock);
! return ret;
}
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.31
retrieving revision 1.32
diff -C2 -r1.31 -r1.32
*** vmscan.c 28 Apr 2002 20:51:34 -0000 1.31
--- vmscan.c 2 May 2002 16:31:37 -0000 1.32
***************
*** 511,516 ****
/* compress it if it's a clean page that has not been
* compressed in a previous iteration */
! if (compress_clean_page(page, gfp_mask))
continue;
/* point of no return */
--- 511,520 ----
/* compress it if it's a clean page that has not been
* compressed in a previous iteration */
! if (compress_clean_page(page, gfp_mask)) {
! list_del(entry);
! list_add_tail(entry, &inactive_list);
! max_scan++;
continue;
+ }
/* point of no return */
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:52:06
|
Update of /cvsroot/linuxcompressed/linux/arch/i386 In directory usw-pr-cvs1:/tmp/cvs-serv2510/arch/i386 Modified Files: config.in Log Message: This version features a first non-functional version of compressed cache automatic automatic adaptivity to system behaviour. It also has many changes aiming to fix the performance drop we have in linux kernel compilation test (check statistics for 0.23pre1 on our web site). Our analysis isn't complete and more changes are likely to go since a huge percentage of CPU is still not being used. Anyway, the current changes improve compressed cache a lot, mainly compressed cache support for page cache, and it already works much better in that scenario. Some detailed changes: - Configuration options changes. Now we only make compressed cache option available if SMP is turned off. Page cache support is an option, that is disabled by default. There's also an option to enable adaptivity, which is currently non-functional. - There's no option in kernel configuration to select initial compressed cache size any longer. It can be selected only by kernel parameter. This parameter won't be available when adaptivity option is enabled (since the system will configure compressed cache automatically). In this case, initial compressed cache size is 10% of total memory size. - Functions cleanup: all algorithms functions and related stuff are now in proc.c file; statistics functions were rewritten and are simpler. - New statistics are collected by the system, like a per-cache analysis (swap and page cache). Statistics is much more complete and nicer. - Now there are functions that force the VM to skip writing dirty buffer, shrinking slab cache, dcache and icache, since we want the system to put much more pressure on pages from page and swap cache in order to have these kind of pages compressed. - Pages are removed from compressed cache in swapin if the process has write permissions. Since the pte will be set dirty, the page will be surely compressed again, so why keep it in the compressed cache? - If we are swapping in and the page is not present in swap cache, we no longer read a cluster of pages from swap device if the page is in compressed cache. This conceptual bug forced us to read many pages from swap device if the page was compressed in our cache, what's wrong. The same way, that happened when a file entry was faulted in and we service this fault. Beforehand we were forcing a cluster read even if the page were present in compressed cache. Index: config.in =================================================================== RCS file: /cvsroot/linuxcompressed/linux/arch/i386/config.in,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** config.in 26 Feb 2002 20:59:01 -0000 1.16 --- config.in 28 Apr 2002 20:51:32 -0000 1.17 *************** *** 207,213 **** comment 'General setup' ! dep_bool 'Compressed Cache' CONFIG_COMP_CACHE $CONFIG_EXPERIMENTAL ! if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! int 'Initial Number of Compressed Pages' CONFIG_COMP_CACHE_SIZE 2048 fi --- 207,217 ---- comment 'General setup' ! if [ "$CONFIG_SMP" != "y" ]; then ! dep_bool 'Compressed cache (EXPERIMENTAL)' CONFIG_COMP_CACHE $CONFIG_EXPERIMENTAL ! define_bool CONFIG_COMP_ADAPTIVITY n ! if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! bool ' Support for Page Cache compression' CONFIG_COMP_PAGE_CACHE ! bool ' Automatic adaptivity for compressed cache size' CONFIG_COMP_ADAPTIVITY ! fi fi |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:52:06
|
Update of /cvsroot/linuxcompressed/linux/fs In directory usw-pr-cvs1:/tmp/cvs-serv2510/fs Added Files: buffer.c Log Message: This version features a first non-functional version of compressed cache automatic automatic adaptivity to system behaviour. It also has many changes aiming to fix the performance drop we have in linux kernel compilation test (check statistics for 0.23pre1 on our web site). Our analysis isn't complete and more changes are likely to go since a huge percentage of CPU is still not being used. Anyway, the current changes improve compressed cache a lot, mainly compressed cache support for page cache, and it already works much better in that scenario. Some detailed changes: - Configuration options changes. Now we only make compressed cache option available if SMP is turned off. Page cache support is an option, that is disabled by default. There's also an option to enable adaptivity, which is currently non-functional. - There's no option in kernel configuration to select initial compressed cache size any longer. It can be selected only by kernel parameter. This parameter won't be available when adaptivity option is enabled (since the system will configure compressed cache automatically). In this case, initial compressed cache size is 10% of total memory size. - Functions cleanup: all algorithms functions and related stuff are now in proc.c file; statistics functions were rewritten and are simpler. - New statistics are collected by the system, like a per-cache analysis (swap and page cache). Statistics is much more complete and nicer. - Now there are functions that force the VM to skip writing dirty buffer, shrinking slab cache, dcache and icache, since we want the system to put much more pressure on pages from page and swap cache in order to have these kind of pages compressed. - Pages are removed from compressed cache in swapin if the process has write permissions. Since the pte will be set dirty, the page will be surely compressed again, so why keep it in the compressed cache? - If we are swapping in and the page is not present in swap cache, we no longer read a cluster of pages from swap device if the page is in compressed cache. This conceptual bug forced us to read many pages from swap device if the page was compressed in our cache, what's wrong. The same way, that happened when a file entry was faulted in and we service this fault. Beforehand we were forcing a cluster read even if the page were present in compressed cache. |
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:52:06
|
Update of /cvsroot/linuxcompressed/linux/Documentation
In directory usw-pr-cvs1:/tmp/cvs-serv2510/Documentation
Modified Files:
Configure.help
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: Configure.help
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/Documentation/Configure.help,v
retrieving revision 1.5
retrieving revision 1.6
diff -C2 -r1.5 -r1.6
*** Configure.help 26 Feb 2002 20:59:01 -0000 1.5
--- Configure.help 28 Apr 2002 20:51:32 -0000 1.6
***************
*** 377,383 ****
gigabytes of physical RAM.
! Compressed Cache
CONFIG_COMP_CACHE
! Select this if you want to enable Compressed Cache support. If this
option is chosen, your memory will be split into two caches:
uncompressed cache, which holds pages in normal state; and
--- 377,383 ----
gigabytes of physical RAM.
! Compressed cache (EXPERIMENTAL)
CONFIG_COMP_CACHE
! Select this if you want to enable compressed cache support. If this
option is chosen, your memory will be split into two caches:
uncompressed cache, which holds pages in normal state; and
***************
*** 386,399 ****
system performance.
If unsure, say N here.
! Maximum Compressed Cache Size (Memory Pages)
! CONFIG_COMP_CACHE_SIZE
! Here you choose the maximum number of memory pages used by the
! Compressed Cache. If the number is greater than half of memory size,
! it will set to 512, the default value.
! The maximum value will be not necessarily used and can be configured
! on-the-fly by /proc/sys/vm/comp_cache/size entry.
Normal floppy disk support
--- 386,414 ----
system performance.
+ Initial number of pages reserved for compressed cache is set by the
+ kernel parameter "compsize=N", where N is a number of memory pages.
+
+ If unsure, say N here.
+
+ Support for Page Cache compression
+ CONFIG_COMP_PAGE_CACHE
+ Select this option in case you want compressed cache to store also
+ pages from page cache, ie file mapped pages, and to take into
+ account these pages when adapting compressed cache to recent
+ behaviour. If you don't select this option, compressed cache will
+ store only anonymous pages, ie pages not mapped to files.
+
If unsure, say N here.
! Automatic adaptivity for compressed cache size
! CONFIG_COMP_ADAPTIVITY
! Select this option in case you want compressed cache to adapt its
! size to the system behaviour. That way, current code will
! automatically compute the cost and benefit of several compressed
! cache sizes, choosing the best size for whole system performance.
!
! This option is still not functional.
! If unsure, say N here.
Normal floppy disk support
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:38
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv2510/mm/comp_cache
Modified Files:
adaptivity.c aux.c free.c main.c proc.c swapin.c swapout.c
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: adaptivity.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v
retrieving revision 1.16
retrieving revision 1.17
diff -C2 -r1.16 -r1.17
*** adaptivity.c 26 Mar 2002 12:35:10 -0000 1.16
--- adaptivity.c 28 Apr 2002 20:51:35 -0000 1.17
***************
*** 2,6 ****
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-03-26 09:21:19 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-04-03 12:33:28 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 18,21 ****
--- 18,168 ----
static int fragment_failed_alloc = 0, vswap_failed_alloc = 0;
+ struct preset_comp_cache * preset_comp_cache;
+ int nr_preset_sizes, current_preset_size;
+
+ static double time_comp = 0.3, time_decomp = 0.2, time_disk_read = 5;
+ int latest_uncomp_misses[10], latest_miss;
+
+ #define comp_cache_used_space ((real_num_comp_pages * PAGE_SIZE) - comp_cache_free_space)
+ #define comp_cache_total_space (preset_comp_cache[i].size * PAGE_SIZE)
+
+ /***
+ * adapt_comp_cache(void) - adapt compressed cache to the recent
+ * behaviour, resizing it if we would have better performance with
+ * another size.
+ *
+ * TODO
+ * - make compressed_ratio variable show the actual ratio
+ * - collect faults by lru region
+ * - account the number of swap cache pages in active and inactive lists?
+ */
+ void
+ adapt_comp_cache(void) {
+ static int nr = 0;
+ int i, best_size, nr_uncomp_misses, uncomp_size, delta_disk_reads, compress_ratio = 2;
+
+ if (++nr % 100)
+ return;
+
+ /* decay miss information */
+ i = (latest_miss + 1) % 10;
+ while (i != latest_miss) {
+ latest_uncomp_misses[i] = 0.8 * latest_uncomp_misses[i];
+ i = (i + 1) % 10;
+ }
+ latest_uncomp_misses[latest_miss] = nr_compressed_cache_misses + nr_swap_misses;
+
+ for (nr_uncomp_misses = 0, i = 0; i < 10; i++)
+ nr_uncomp_misses += latest_uncomp_misses[i];
+
+ latest_miss = (latest_miss + 1) % 10;
+
+ if (!nr_uncomp_misses)
+ return;
+
+ printk("nr_uncomp_misses %d\n", nr_uncomp_misses);
+ printk("free space %ld\n", (comp_cache_free_space * 100)/(real_num_comp_pages * PAGE_SIZE));
+
+ /* compute costs and benefits - smaller sizes*/
+ best_size = current_preset_size;
+ for (i = current_preset_size; i >= 0; i--) {
+ double cost, benefit;
+ int comp_size, delta_real_size;
+
+ comp_size = preset_comp_cache[i].size;
+ uncomp_size = num_physpages - comp_size;
+
+ delta_real_size = (comp_cache_total_space/compress_ratio);
+ printk("size %d real size %d used space %ld\n", preset_comp_cache[i].size, delta_real_size, comp_cache_used_space);
+
+ if (comp_cache_used_space < delta_real_size)
+ delta_disk_reads = 0;
+ else {
+ if (comp_cache_used_space > preset_comp_cache[i].size * PAGE_SIZE) {
+ delta_disk_reads = ((float) comp_size)/preset_comp_cache[current_preset_size].size * nr_compressed_cache_misses;
+ //printk("disk reads 1 %d\n", delta_disk_reads);
+ }
+ else {
+ delta_disk_reads = ((comp_cache_used_space - delta_real_size) * nr_compressed_cache_misses)/comp_cache_used_space;
+ //printk("disk reads 2 %d\n", delta_disk_reads);
+ }
+ }
+
+ cost = (nr_uncomp_misses * comp_size)/preset_comp_cache[current_preset_size].size;
+ printk("cost %d\n", (int) cost);
+ cost *= (time_comp + time_decomp);
+ benefit = delta_disk_reads * (time_disk_read);
+ printk("cost %d benefit %d\n", (int) cost, (int) benefit);
+
+ preset_comp_cache[i].profit = cost - benefit;
+
+ if (preset_comp_cache[i].profit < preset_comp_cache[best_size].profit)
+ best_size = i;
+
+ printk("profit %d -> %d (smaller)\n", i, preset_comp_cache[i].profit);
+ }
+
+ if (comp_cache_free_space > 0.30 * real_num_comp_pages * PAGE_SIZE)
+ goto out;
+
+ /* compute costs and benefits - larger sizes*/
+ for (i = current_preset_size + 1; i < nr_preset_sizes; i++) {
+ double cost, benefit;
+ int comp_size, diff_new_real_old_uncomp, incr_comp_size, scale = 0;
+
+ comp_size = preset_comp_cache[i].size;
+ uncomp_size = num_physpages - comp_size;
+
+ /* new real memory size in function of the new compressed cache size */
+ diff_new_real_old_uncomp = uncomp_size + comp_size/compress_ratio;
+ /* minus the current uncompressed cache */
+ diff_new_real_old_uncomp -= (num_physpages - preset_comp_cache[current_preset_size].size);
+
+ /* unlikely */
+ if (diff_new_real_old_uncomp > 0) {
+ printk("1st case\n");
+ scale = 1;
+ }
+
+ /* we can fill up the new comp cache space */
+ incr_comp_size = preset_comp_cache[i].size - preset_comp_cache[current_preset_size].size;
+ if (swapper_space.nrpages/compress_ratio > incr_comp_size) {
+ printk("fill up\n");
+ scale = 1;
+ }
+
+ printk("nr_compressed_cache_misses %d\n", nr_compressed_cache_misses);
+
+ if (scale)
+ delta_disk_reads = (1 - ((float) diff_new_real_old_uncomp/preset_comp_cache[current_preset_size].size)) * nr_compressed_cache_misses;
+ else {
+ delta_disk_reads = nr_compressed_cache_misses;
+ delta_disk_reads += ((((float) swapper_space.nrpages)/compress_ratio - (incr_comp_size + diff_new_real_old_uncomp)) * nr_compressed_cache_misses)/preset_comp_cache[current_preset_size].size;
+ printk("delta_disk_reads %d\n", delta_disk_reads);
+ }
+
+ cost = nr_uncomp_misses * ((float) preset_comp_cache[i].size/preset_comp_cache[current_preset_size].size);
+ cost *= (time_comp + time_decomp);
+ benefit = delta_disk_reads * (time_disk_read);
+ printk("cost %d benefit %d\n", (int) cost, (int) benefit);
+
+ preset_comp_cache[i].profit = cost - benefit;
+
+ printk("profit %d -> %d (bigger)\n", i, preset_comp_cache[i].profit);
+
+ if (preset_comp_cache[i].profit < preset_comp_cache[best_size].profit)
+ best_size = i;
+ }
+
+
+ out:
+ new_num_comp_pages = preset_comp_cache[best_size].size;
+ current_preset_size = best_size;
+ printk("best size %d\n", best_size);
+
+ /* reset stats */
+ nr_compressed_cache_misses = nr_swap_misses = 0;
+ }
+
void
resize_fragment_hash_table(void) {
***************
*** 417,420 ****
--- 564,568 ----
comp_cache_freeable_space -= PAGE_SIZE;
+ comp_cache_free_space -= PAGE_SIZE;
real_num_comp_pages--;
//printk("shrink new %lu real %lu\n", new_num_comp_pages, real_num_comp_pages);
***************
*** 495,498 ****
--- 643,647 ----
comp_cache_freeable_space += PAGE_SIZE;
+ comp_cache_free_space += PAGE_SIZE;
real_num_comp_pages++;
//printk("grow real %lu\n", real_num_comp_pages);
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -C2 -r1.23 -r1.24
*** aux.c 21 Mar 2002 19:24:17 -0000 1.23
--- aux.c 28 Apr 2002 20:51:35 -0000 1.24
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-03-20 16:48:44 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-04-18 13:00:30 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 293,302 ****
}
- inline int
- comp_cache_free_space(void)
- {
- return 2 * (comp_cache_freeable_space >> PAGE_SHIFT);
- }
-
inline void
print_all_fragments (comp_cache_t * comp_page)
--- 293,296 ----
***************
*** 372,424 ****
}
- void
- comp_cache_update_comp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats)
- {
- compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
- stats_summary_t * stats = &(algorithm->stats);
-
- stats->pgccout++;
-
- /* update compressed size statistics */
- if (!comp_page_stats->comp_size)
- BUG();
-
- if (comp_page_stats->comp_size < stats->comp_size_min)
- stats->comp_size_min = comp_page_stats->comp_size;
-
- if (comp_page_stats->comp_size > stats->comp_size_max)
- stats->comp_size_max = comp_page_stats->comp_size;
-
- stats->comp_size_sum += comp_page_stats->comp_size;
-
- /* update comp cycles statistics */
- if (comp_page_stats->comp_cycles < stats->comp_cycles_min)
- stats->comp_cycles_min = comp_page_stats->comp_cycles;
-
- if (comp_page_stats->comp_cycles > stats->comp_cycles_max)
- stats->comp_cycles_max = comp_page_stats->comp_cycles;
-
- stats->comp_cycles_sum += comp_page_stats->comp_cycles;
-
- if (((float) comp_page_stats->comp_size/PAGE_SIZE) > DISCARD_MARK)
- stats->discarded_pages++;
- }
-
- void
- comp_cache_update_decomp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats)
- {
- compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
- stats_summary_t * stats = &(algorithm->stats);
-
- /* update decomp cycles statistics */
- if (comp_page_stats->decomp_cycles < stats->decomp_cycles_min)
- stats->decomp_cycles_min = comp_page_stats->decomp_cycles;
-
- if (comp_page_stats->decomp_cycles > stats->decomp_cycles_max)
- stats->decomp_cycles_max = comp_page_stats->decomp_cycles;
-
- stats->decomp_cycles_sum += comp_page_stats->decomp_cycles;
- }
-
comp_cache_fragment_t **
create_fragment_hash(unsigned long * fragment_hash_size, unsigned int * bits, unsigned int * order) {
--- 366,369 ----
***************
*** 457,461 ****
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: fragment hash table - %lu = %luB\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
--- 402,406 ----
panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n");
! printk("Compressed Cache: fragment hash table - %lu entries = %luB\n", fragment_hash_size, (PAGE_SIZE << fragment_hash_order));
/* inits comp cache free space hash table */
***************
*** 465,469 ****
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space hash table - %u = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
--- 410,414 ----
free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *));
! printk("Compressed Cache: free space hash table - %u entries = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *));
if (!free_space_hash)
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** free.c 12 Mar 2002 17:54:20 -0000 1.26
--- free.c 28 Apr 2002 20:51:35 -0000 1.27
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-03-12 12:48:14 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-04-02 09:31:32 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 71,74 ****
--- 71,75 ----
list_del_init(&fragment->mapping_list);
fragment->mapping->nrpages--;
+ comp_cache_free_space += fragment->compressed_size;
}
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.33
retrieving revision 1.34
diff -C2 -r1.33 -r1.34
*** main.c 13 Mar 2002 20:44:33 -0000 1.33
--- main.c 28 Apr 2002 20:51:35 -0000 1.34
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-03-13 09:05:06 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-04-28 16:51:46 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 16,39 ****
#include <linux/pagemap.h>
- #include <linux/WKcommon.h>
- #include <linux/WKdm.h>
- #include <linux/WK4x4.h>
-
#include <asm/page.h>
#include <net/checksum.h>
- /* data used for compression */
- comp_data_t comp_data;
-
- WK_word compresseddata[1200];
- WK_word decompresseddata[1200];
- WK_word compressedtempTagsArray[300];
- WK_word compressedtempQPosArray[300];
- WK_word compressedtempLowBitsArray[1200];
-
- char compressedhashLookupTable_WKdm [] = HASH_LOOKUP_TABLE_CONTENTS_WKDM;
- unsigned int compressedhashLookupTable_WK4x4 [] = HASH_LOOKUP_TABLE_CONTENTS_WK4x4;
- DictionaryElement compresseddictionary[DICTIONARY_SIZE];
-
/* compressed cache */
unsigned long real_num_comp_pages = 0;
--- 16,22 ----
***************
*** 44,89 ****
unsigned long min_num_comp_pages = 0;
kmem_cache_t * comp_cachep;
kmem_cache_t * fragment_cachep;
! /* compression algorithms */
! compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! int current_algorithm;
extern unsigned long num_physpages;
extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int);
- extern void comp_cache_update_comp_stats(unsigned short, stats_page_t *);
! static inline void
! set_fragment_algorithm(comp_cache_fragment_t * fragment, unsigned short algorithm)
{
! switch(algorithm) {
! case WKDM_IDX:
! CompFragmentSetWKdm(fragment);
! break;
! case WK4X4_IDX:
! CompFragmentSetWK4x4(fragment);
! break;
! default:
! BUG();
! }
}
! static inline int
! compress(void * from, void * to, unsigned short * algorithm)
{
! stats_page_t comp_page_stats;
!
! START_ZEN_TIME(comp_page_stats.myTimer);
! comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
! STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
!
! /* update some statistics */
! comp_cache_update_comp_stats(current_algorithm, &comp_page_stats);
!
! *algorithm = current_algorithm;
!
! return ((comp_page_stats.comp_size <= PAGE_SIZE)?comp_page_stats.comp_size:PAGE_SIZE);
}
--- 27,75 ----
unsigned long min_num_comp_pages = 0;
+ unsigned long comp_cache_free_space;
+
kmem_cache_t * comp_cachep;
kmem_cache_t * fragment_cachep;
! int nr_swap_misses;
! int nr_compressed_cache_misses;
!
! static unsigned int skip_buffer = 0, skip_slab = 0, skip_dicache = 0;
extern unsigned long num_physpages;
extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int);
! /***
! * Skips a number of dirty buffer writes when we have much memory
! * pressure, since we want the system to put much more pressure on
! * pages from page and swap cache in order to have these kind of pages
! * compressed.
! *
! * TODO: make the number of skips dependent on relative compressed
! * cache size.
! */
! inline int
! comp_cache_skip_buffer_freeing(void)
{
! return (skip_buffer = ++skip_buffer % 500);
}
! /***
! * Same as above, but skips shrinking slab cache (kmem_cache_reap()).
! */
! inline int
! comp_cache_skip_slab_shrunk(void)
{
! return (skip_slab = ++skip_slab % 500);
! }
!
! /***
! * Same as above, but skips shrinking dcache and icache.
! */
! inline int
! comp_cache_skip_dicache_shrunk(void)
! {
! return (skip_dicache = ++skip_dicache % 500);
}
***************
*** 92,97 ****
{
int ret;
!
! if (likely(!shmem_page(page)))
ret = compress_page(page, 1, gfp_mask);
else
--- 78,87 ----
{
int ret;
!
! #ifdef CONFIG_COMP_PAGE_CACHE
! if (!shmem_page(page))
! #else
! if (PageSwapCache(page))
! #endif
ret = compress_page(page, 1, gfp_mask);
else
***************
*** 106,109 ****
--- 96,104 ----
return 0;
+ #ifndef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(page))
+ return 0;
+ #endif
+
page_cache_get(page);
spin_unlock(&pagecache_lock);
***************
*** 133,137 ****
if (!PageLocked(page))
BUG();
-
if (PageTestandClearCompCache(page)) {
if (!dirty)
--- 128,131 ----
***************
*** 139,151 ****
invalidate_comp_cache(page->mapping, page->index);
}
! comp_size = compress(page_address(current_compressed_page = page), buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
! * right now, because it won't be compressed. Since it had its
! * dirty bit cleared, we have to set it back, because it is
! * still dirty and must cleaned if needed */
if (!comp_page)
return 0;
--- 133,149 ----
invalidate_comp_cache(page->mapping, page->index);
}
+
+ #ifdef CONFIG_COMP_ADAPTIVITY
+ adapt_comp_cache();
+ #endif
! comp_size = compress(current_compressed_page = page, buffer_compressed = (unsigned long *) &buffer_compressed1, &algorithm);
comp_page = get_comp_cache_page(page, comp_size, &fragment, dirty, gfp_mask);
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
! * right now, because it won't be compressed. Its dirty bit
! * has been set back in get_comp_cache_page() since it's still
! * dirty and needs to be cleaned. */
if (!comp_page)
return 0;
***************
*** 158,162 ****
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page_address(page), buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
--- 156,160 ----
if (compressed(fragment)) {
if (current_compressed_page != page)
! compress(page, buffer_compressed = (unsigned long *) &buffer_compressed2, &algorithm);
memcpy(page_address(comp_page->page) + fragment->offset, buffer_compressed , fragment->compressed_size);
} else
***************
*** 175,194 ****
extern void __init comp_cache_vswap_init(void);
- #define RESET_STATS \
- do { \
- compression_algorithms[current_algorithm].stats.comp_size_sum = 0; \
- compression_algorithms[current_algorithm].stats.comp_size_max = 0; \
- compression_algorithms[current_algorithm].stats.comp_size_min = INF; \
- compression_algorithms[current_algorithm].stats.comp_cycles_sum = 0; \
- compression_algorithms[current_algorithm].stats.comp_cycles_max = 0; \
- compression_algorithms[current_algorithm].stats.comp_cycles_min = INF; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_sum = 0; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_max = 0; \
- compression_algorithms[current_algorithm].stats.decomp_cycles_min = INF; \
- compression_algorithms[current_algorithm].stats.pgccin = 0; \
- compression_algorithms[current_algorithm].stats.pgccout = 0; \
- compression_algorithms[current_algorithm].stats.discarded_pages = 0; } \
- while (0)
-
LIST_HEAD(lru_queue);
--- 173,176 ----
***************
*** 209,234 ****
int i;
! max_num_comp_pages = num_physpages/2;
!
! if (!init_num_comp_pages)
! init_num_comp_pages = CONFIG_COMP_CACHE_SIZE;
if (!init_num_comp_pages || init_num_comp_pages > max_num_comp_pages)
! init_num_comp_pages = 512;
!
! new_num_comp_pages = real_num_comp_pages = init_num_comp_pages;
! min_num_comp_pages = 0;
!
! printk("Compressed Cache: starting %s - %lu pages = %luKiB\n", COMP_CACHE_VERSION, init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
!
! /* initialize our data for the `test' compressed_page */
! comp_data.compressed_data = compresseddata;
! comp_data.decompressed_data = decompresseddata;
! comp_data.hashLookupTable_WKdm = compressedhashLookupTable_WKdm;
! comp_data.hashLookupTable_WK4x4 = compressedhashLookupTable_WK4x4;
! comp_data.dictionary = compresseddictionary;
! comp_data.tempTagsArray = compressedtempTagsArray;
! comp_data.tempQPosArray = compressedtempQPosArray;
! comp_data.tempLowBitsArray = compressedtempLowBitsArray;
/* create slab caches */
--- 191,227 ----
int i;
! max_num_comp_pages = num_physpages * 0.5;
+ #ifndef CONFIG_COMP_ADAPTIVITY
if (!init_num_comp_pages || init_num_comp_pages > max_num_comp_pages)
! #endif
! init_num_comp_pages = num_physpages * 0.10;
! new_num_comp_pages = min_num_comp_pages = real_num_comp_pages = init_num_comp_pages;
!
! printk("Compressed Cache: starting %s version\n", COMP_CACHE_VERSION);
!
! /* adaptivity */
! nr_swap_misses = 0;
! nr_compressed_cache_misses = 0;
!
! nr_preset_sizes = 4;
! preset_comp_cache = (struct preset_comp_cache *) kmalloc(nr_preset_sizes * sizeof(*preset_comp_cache), GFP_ATOMIC);
!
! #ifdef CONFIG_COMP_ADAPTIVITY
! printk("Compressed Cache: adaptivity\n");
! preset_comp_cache[0].size = num_physpages * 0.10;
! preset_comp_cache[1].size = num_physpages * 0.23;
! preset_comp_cache[2].size = num_physpages * 0.37;
! preset_comp_cache[3].size = num_physpages * 0.50;
!
! for (i = 0; i < nr_preset_sizes; i++)
! printk("Compressed Cache: preset size %d: %u memory pages\n", i, preset_comp_cache[i].size);
!
! for (i = 0; i < 10; i++)
! latest_uncomp_misses[i] = 0;
! latest_miss = 0;
! #else
! printk("Compressed Cache: initial size - %lu pages = %luKiB\n", init_num_comp_pages, (init_num_comp_pages * PAGE_SIZE)/1024);
! #endif
/* create slab caches */
***************
*** 249,277 ****
init_comp_page(&comp_page, page);
}
/* initialize our algorithms statistics array */
! for (current_algorithm = 0; current_algorithm < NUM_ALGORITHMS; current_algorithm++)
! RESET_STATS;
!
! strcpy(compression_algorithms[WKDM_IDX].name, "WKdm");
! compression_algorithms[WKDM_IDX].comp = WKdm_compress;
! compression_algorithms[WKDM_IDX].decomp = WKdm_decompress;
!
! strcpy(compression_algorithms[WK4X4_IDX].name, "WK4x4");
! compression_algorithms[WK4X4_IDX].comp = WK4x4_compress;
! compression_algorithms[WK4X4_IDX].decomp = WK4x4_decompress;
!
! current_algorithm = WKDM_IDX;
}
static int __init comp_cache_size(char *str)
{
! char * endp;
!
! init_num_comp_pages = simple_strtoul(str, &endp, 0);
! return 1;
}
__setup("compsize=", comp_cache_size);
/*
--- 242,262 ----
init_comp_page(&comp_page, page);
}
+ comp_cache_free_space = real_num_comp_pages * PAGE_SIZE;
/* initialize our algorithms statistics array */
! comp_cache_algorithms_init();
}
+ #ifndef CONFIG_COMP_ADAPTIVITY
static int __init comp_cache_size(char *str)
{
! char * endp;
!
! init_num_comp_pages = simple_strtoul(str, &endp, 0);
! return 1;
}
__setup("compsize=", comp_cache_size);
+ #endif
/*
Index: proc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/proc.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -r1.6 -r1.7
*** proc.c 28 Dec 2001 21:45:24 -0000 1.6
--- proc.c 28 Apr 2002 20:51:35 -0000 1.7
***************
*** 2,6 ****
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2001-12-27 15:35:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/proc.c
*
! * Time-stamp: <2002-04-18 15:32:34 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 13,18 ****
#include <linux/sysctl.h>
extern unsigned long new_num_comp_pages, max_num_comp_pages, min_num_comp_pages;
! int algorithm_min = WKDM_IDX<WK4X4_IDX?WKDM_IDX:WK4X4_IDX, algorithm_max = WKDM_IDX>WK4X4_IDX?WKDM_IDX:WK4X4_IDX;
enum
--- 13,43 ----
#include <linux/sysctl.h>
+ #include <linux/WKcommon.h>
+ #include <linux/WKdm.h>
+ #include <linux/WK4x4.h>
+
+ #define NUM_ALGORITHMS 2
+ #define WKDM_IDX 0
+ #define WK4X4_IDX 1
+
extern unsigned long new_num_comp_pages, max_num_comp_pages, min_num_comp_pages;
!
! static compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! static int algorithm_min = WKDM_IDX;
! static int algorithm_max = WK4X4_IDX;
! static int current_algorithm;
!
! /* data used for compression */
! static comp_data_t comp_data;
!
! static WK_word compresseddata[1200];
! static WK_word decompresseddata[1200];
! static WK_word compressedtempTagsArray[300];
! static WK_word compressedtempQPosArray[300];
! static WK_word compressedtempLowBitsArray[1200];
!
! static char compressedhashLookupTable_WKdm [] = HASH_LOOKUP_TABLE_CONTENTS_WKDM;
! static unsigned int compressedhashLookupTable_WK4x4 [] = HASH_LOOKUP_TABLE_CONTENTS_WK4x4;
! static DictionaryElement compresseddictionary[DICTIONARY_SIZE];
enum
***************
*** 32,35 ****
--- 57,217 ----
};
+ static void
+ comp_cache_update_comp_stats(stats_page_t * comp_page_stats, struct page * page)
+ {
+ compression_algorithm_t * algorithm = &compression_algorithms[current_algorithm];
+ stats_summary_t * stats = &(algorithm->stats);
+
+ /* update compressed size statistics */
+ if (!comp_page_stats->comp_size)
+ BUG();
+
+ if (comp_page_stats->comp_size < stats->comp_size_min)
+ stats->comp_size_min = comp_page_stats->comp_size;
+
+ if (comp_page_stats->comp_size > stats->comp_size_max)
+ stats->comp_size_max = comp_page_stats->comp_size;
+
+ stats->comp_size_sum += comp_page_stats->comp_size;
+
+ /* update comp cycles statistics */
+ if (comp_page_stats->comp_cycles < stats->comp_cycles_min)
+ stats->comp_cycles_min = comp_page_stats->comp_cycles;
+
+ if (comp_page_stats->comp_cycles > stats->comp_cycles_max)
+ stats->comp_cycles_max = comp_page_stats->comp_cycles;
+
+ stats->comp_cycles_sum += comp_page_stats->comp_cycles;
+
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(page))
+ compression_algorithms[current_algorithm].stats.comp_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.comp_swap++;
+ }
+
+ static void
+ comp_cache_update_decomp_stats(unsigned short alg_idx, stats_page_t * comp_page_stats, comp_cache_fragment_t * fragment)
+ {
+ compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
+ stats_summary_t * stats = &(algorithm->stats);
+
+ /* update decomp cycles statistics */
+ if (comp_page_stats->decomp_cycles < stats->decomp_cycles_min)
+ stats->decomp_cycles_min = comp_page_stats->decomp_cycles;
+
+ if (comp_page_stats->decomp_cycles > stats->decomp_cycles_max)
+ stats->decomp_cycles_max = comp_page_stats->decomp_cycles;
+
+ stats->decomp_cycles_sum += comp_page_stats->decomp_cycles;
+
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.decomp_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.decomp_swap++;
+ }
+
+ void
+ comp_cache_update_writeout_stats(comp_cache_fragment_t * fragment)
+ {
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.page_out++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.swap_out++;
+ }
+
+ void
+ comp_cache_update_faultin_stats(comp_cache_fragment_t * fragment)
+ {
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ if (!PageSwapCache(fragment))
+ compression_algorithms[current_algorithm].stats.faultin_page++;
+ else
+ #endif
+ compression_algorithms[current_algorithm].stats.faultin_swap++;
+ }
+
+ void
+ set_fragment_algorithm(comp_cache_fragment_t * fragment, unsigned short algorithm)
+ {
+ if (algorithm == WKDM_IDX)
+ CompFragmentSetWKdm(fragment);
+ else
+ CompFragmentSetWK4x4(fragment);
+ }
+
+ int
+ compress(struct page * page, void * to, unsigned short * algorithm)
+ {
+ stats_page_t comp_page_stats;
+ void * from = page_address(page);
+
+ START_ZEN_TIME(comp_page_stats.myTimer);
+ comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
+ STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
+ comp_cache_update_comp_stats(&comp_page_stats, page);
+
+ *algorithm = current_algorithm;
+ if (comp_page_stats.comp_size > PAGE_SIZE)
+ comp_page_stats.comp_size = PAGE_SIZE;
+
+ return (comp_page_stats.comp_size);
+ }
+
+ void
+ decompress(comp_cache_fragment_t * fragment, struct page * page)
+ {
+ stats_page_t comp_page_stats;
+ void * from = page_address(fragment->comp_page->page) + fragment->offset;
+ void * to = page_address(page);
+ unsigned int algorithm;
+
+ if (CompFragmentWKdm(fragment))
+ algorithm = WKDM_IDX;
+ else
+ algorithm = WK4X4_IDX;
+
+ START_ZEN_TIME(comp_page_stats.myTimer);
+ compression_algorithms[algorithm].decomp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
+ STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.decomp_cycles);
+ comp_cache_update_decomp_stats(algorithm, &comp_page_stats, fragment);
+ }
+
+
+ void __init
+ comp_cache_algorithms_init(void)
+ {
+ /* initialize our data for the `test' compressed_page */
+ comp_data.compressed_data = compresseddata;
+ comp_data.decompressed_data = decompresseddata;
+ comp_data.hashLookupTable_WKdm = compressedhashLookupTable_WKdm;
+ comp_data.hashLookupTable_WK4x4 = compressedhashLookupTable_WK4x4;
+ comp_data.dictionary = compresseddictionary;
+ comp_data.tempTagsArray = compressedtempTagsArray;
+ comp_data.tempQPosArray = compressedtempQPosArray;
+ comp_data.tempLowBitsArray = compressedtempLowBitsArray;
+
+ for (current_algorithm = 0; current_algorithm < NUM_ALGORITHMS; current_algorithm++) {
+ memset((void *) &compression_algorithms[current_algorithm], 0, sizeof(stats_summary_t));
+ compression_algorithms[current_algorithm].stats.comp_size_min = INF;
+ compression_algorithms[current_algorithm].stats.comp_cycles_min = INF;
+ compression_algorithms[current_algorithm].stats.decomp_cycles_min = INF;
+ }
+
+ strcpy(compression_algorithms[WKDM_IDX].name, "WKdm");
+ compression_algorithms[WKDM_IDX].comp = WKdm_compress;
+ compression_algorithms[WKDM_IDX].decomp = WKdm_decompress;
+
+ strcpy(compression_algorithms[WK4X4_IDX].name, "WK4x4");
+ compression_algorithms[WK4X4_IDX].comp = WK4x4_compress;
+ compression_algorithms[WK4X4_IDX].decomp = WK4x4_decompress;
+
+ current_algorithm = WKDM_IDX;
+ }
static int proc_calc_metrics(char *page, char **start, off_t off,
***************
*** 44,109 ****
}
void
print_comp_cache_stats(unsigned short alg_idx, char * page, int * length)
{
! unsigned int compression_ratio = 0, discard_ratio = 0;
! unsigned long long mean_size = 0, mean_comp_cycles = 0, mean_decomp_cycles = 0;
compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
stats_summary_t * stats = &algorithm->stats;
! if (!stats->pgccout) {
! *length += sprintf(page + *length, "compressed cache statistics\n");
! *length += sprintf(page + *length, "no pages have been compressed with %s%s\n\n", algorithm->name, current_msg);
! return;
! }
!
! mean_size = big_division(stats->comp_size_sum, stats->pgccout);
! mean_comp_cycles = big_division(stats->comp_cycles_sum, stats->pgccout);
! mean_decomp_cycles = big_division(stats->decomp_cycles_sum, stats->pgccout);
! compression_ratio = ((big_division(stats->comp_size_sum, stats->pgccout)*100)/PAGE_SIZE);
! discard_ratio = (int) ((stats->discarded_pages * 100)/stats->pgccout);
!
*length += sprintf(page + *length, "compressed cache - statistics\n");
! *length += sprintf(page + *length, "-- algorithm --\n%s%s\n", algorithm->name, current_msg);
! *length += sprintf(page + *length, "-- compressed pages --\n%d\n", stats->pgccout);
! *length += sprintf(page + *length, "-- compressed pages faulted in --\n%d\n", stats->pgccin);
- *length += sprintf(page + *length, "-- size --\n");
- *length += sprintf(page + *length, "min | max | mean\n");
*length += sprintf(page + *length,
! " %8d | "
! " %8d | "
! " %9Lu\n",
! (stats->comp_size_min == INF?0:stats->comp_size_min),
! stats->comp_size_max,
! mean_size);
!
! *length += sprintf(page + *length, "-- compression cycles --\n");
! *length += sprintf(page + *length, "min | max | mean\n");
! *length += sprintf(page + *length,
! "%10lu | "
! "%10lu | "
! "%11Lu\n",
! (stats->comp_cycles_min == INF?0:stats->comp_cycles_min),
! stats->comp_cycles_max,
! mean_comp_cycles);
! *length += sprintf(page + *length, "-- decompression cycles --\n");
! *length += sprintf(page + *length, "min | max | mean\n");
! *length += sprintf(page + *length,
! "%10lu | "
! "%10lu | "
! "%11Lu\n\n",
! (stats->decomp_cycles_min == INF?0:stats->decomp_cycles_min),
! stats->decomp_cycles_max,
! mean_decomp_cycles);
*length += sprintf(page + *length,
! "compression ratio: %8d%%\n"
! "discarded pages: %8d\n"
! "discarded ratio: %8d%%\n\n",
! compression_ratio,
! stats->discarded_pages,
! discard_ratio);
}
--- 226,307 ----
}
+ #define current_msg ((algorithm == &compression_algorithms[current_algorithm])?"*":"")
+
void
print_comp_cache_stats(unsigned short alg_idx, char * page, int * length)
{
! unsigned int compression_ratio, discard_ratio;
! unsigned int mean_size, mean_comp_cycles, mean_decomp_cycles;
! unsigned long total_comp_pages, total_wout_pages, total_decomp_pages, total_faultin_pages;
!
compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
stats_summary_t * stats = &algorithm->stats;
! total_comp_pages = stats->comp_swap + stats->comp_page;
! total_decomp_pages = stats->decomp_swap + stats->decomp_page;
! total_wout_pages = stats->swap_out + stats->page_out;
! total_faultin_pages = stats->faultin_swap + stats->faultin_page;
!
*length += sprintf(page + *length, "compressed cache - statistics\n");
! *length += sprintf(page + *length, "algorithm %s%s\n", algorithm->name, current_msg);
*length += sprintf(page + *length,
! "Compressed Pages: %8lu\n"
! " Swap Cache: %8lu\n"
! " Page Cache: %8lu\n"
! "Decompressed Pages: %8lu\n"
! " Swap Cache: %8lu\n"
! " Page Cache: %8lu\n"
! "Written Out: %8lu\n"
! " Swap Cache: %8lu\n"
! " Page Cache: %8lu\n"
! "Faulted In: %8lu\n"
! " Swap Cache: %8lu\n"
! " Page Cache: %8lu\n",
! total_comp_pages,
! stats->comp_swap,
! stats->comp_page,
! total_decomp_pages,
! stats->decomp_swap,
! stats->decomp_page,
! total_wout_pages,
! stats->swap_out,
! stats->page_out,
! total_faultin_pages,
! stats->faultin_swap,
! stats->faultin_page);
! if (!total_comp_pages)
! return;
+ mean_size = big_division(stats->comp_size_sum, total_comp_pages);
+ mean_comp_cycles = big_division(stats->comp_cycles_sum, total_comp_pages);
+ mean_decomp_cycles = big_division(stats->decomp_cycles_sum, total_comp_pages);
+ compression_ratio = ((big_division(stats->comp_size_sum, total_comp_pages)*100)/PAGE_SIZE);
+ discard_ratio = (int) ((stats->discarded_pages * 100)/total_comp_pages);
+
*length += sprintf(page + *length,
! "Compression\n"
! " MinSize: %8d\n"
! " MaxSize: %8u\n"
! " AvgSize: %8u\n"
! " Ratio: %8d%%\n"
! " MinCycles: %8lu\n"
! " MaxCycles: %8lu\n"
! " AvgCycles: %8u\n"
! "Decompression\n"
! " MinCycles: %8lu\n"
! " MaxCycles: %8lu\n"
! " AvgCycles: %8u\n",
! stats->comp_size_min,
! stats->comp_size_max,
! mean_size,
! compression_ratio,
! stats->comp_cycles_min,
! stats->comp_cycles_max,
! mean_comp_cycles,
! stats->decomp_cycles_min,
! stats->decomp_cycles_max,
! mean_decomp_cycles);
}
Index: swapin.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v
retrieving revision 1.32
retrieving revision 1.33
diff -C2 -r1.32 -r1.33
*** swapin.c 21 Mar 2002 19:24:17 -0000 1.32
--- swapin.c 28 Apr 2002 20:51:35 -0000 1.33
***************
*** 2,6 ****
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-03-21 08:37:01 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-04-28 17:19:57 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 61,96 ****
}
- extern void comp_cache_update_decomp_stats(unsigned short, stats_page_t *);
-
- static void
- decompress(comp_cache_fragment_t * fragment, struct page * page)
- {
- stats_page_t comp_page_stats;
- unsigned int algorithm = -1;
- void * from, * to;
-
- if (CompFragmentWKdm(fragment)) {
- algorithm = WKDM_IDX;
- goto actually_decomp;
- }
- if (CompFragmentWK4x4(fragment)) {
- algorithm = WK4X4_IDX;
- goto actually_decomp;
- }
-
- BUG();
-
- actually_decomp:
- from = page_address(fragment->comp_page->page) + fragment->offset;
- to = page_address(page);
-
- START_ZEN_TIME(comp_page_stats.myTimer);
- compression_algorithms[algorithm].decomp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
- STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.decomp_cycles);
-
- /* update some statistics */
- comp_cache_update_decomp_stats(algorithm, &comp_page_stats);
- }
-
void
decompress_fragment(comp_cache_fragment_t * fragment, struct page * page)
--- 61,64 ----
***************
*** 113,118 ****
memcpy(page_address(page), page_address(comp_page->page), PAGE_SIZE);
SetPageUptodate(page);
- PageSetCompCache(page);
}
--- 81,86 ----
memcpy(page_address(page), page_address(comp_page->page), PAGE_SIZE);
+ PageSetCompCache(page);
SetPageUptodate(page);
}
***************
*** 129,132 ****
--- 97,104 ----
if (err)
goto out;
+
+ #ifdef CONFIG_COMP_ADAPTIVITY
+ adapt_comp_cache();
+ #endif
if (!PageLocked(page))
***************
*** 134,141 ****
if (TryLockPage(fragment->comp_page->page))
BUG();
!
decompress_fragment(fragment, page);
/* update fault in stats */
! compression_algorithms[current_algorithm].stats.pgccin++;
UnlockPage(fragment->comp_page->page);
--- 106,118 ----
if (TryLockPage(fragment->comp_page->page))
BUG();
!
! /* move the fragment to the back of the lru list */
! remove_fragment_from_lru_queue(fragment);
! add_fragment_to_lru_queue(fragment);
!
decompress_fragment(fragment, page);
+
/* update fault in stats */
! comp_cache_update_faultin_stats(fragment);
UnlockPage(fragment->comp_page->page);
***************
*** 213,220 ****
if (!CompFragmentTestandClearDirty(fragment))
BUG();
!
list_del(&fragment->mapping_list);
! list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
!
if (add_to_page_cache_unique(page, mapping, fragment->index, hash)) {
if (!find_and_dirty_page(mapping, fragment->index, hash))
--- 190,197 ----
if (!CompFragmentTestandClearDirty(fragment))
BUG();
!
list_del(&fragment->mapping_list);
! list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
!
if (add_to_page_cache_unique(page, mapping, fragment->index, hash)) {
if (!find_and_dirty_page(mapping, fragment->index, hash))
***************
*** 227,230 ****
--- 204,208 ----
decompress_fragment(fragment, page);
+ comp_cache_free_locked(fragment);
__set_page_dirty(page);
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.37
retrieving revision 1.38
diff -C2 -r1.37 -r1.38
*** swapout.c 21 Mar 2002 19:24:17 -0000 1.37
--- swapout.c 28 Apr 2002 20:51:35 -0000 1.38
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-03-20 15:23:34 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-04-28 17:21:40 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 179,182 ****
--- 179,187 ----
while (!list_empty(&lru_queue) && maxscan--) {
+ if (unlikely(current->need_resched)) {
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ }
+
fragment = list_entry(fragment_lh = lru_queue.prev, comp_cache_fragment_t, lru_queue);
***************
*** 239,245 ****
if (!writepage)
BUG();
-
- writepage(swp_buffer->page);
out:
if (swap_cache_page)
--- 244,251 ----
if (!writepage)
BUG();
+ comp_cache_update_writeout_stats(fragment);
+ writepage(swp_buffer->page);
+
out:
if (swap_cache_page)
***************
*** 410,413 ****
--- 416,421 ----
last_page = (last_page++)%NUM_MEAN_PAGES;
+ comp_cache_free_space -= compressed_size;
+
add_fragment_vswap(fragment);
***************
*** 497,500 ****
--- 505,509 ----
list_add(&(swp_buffer->list), &swp_free_buffer_head);
+ SetPageDirty(buffer_page);
INIT_LIST_HEAD(&buffer_page->list);
}
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:37
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv2510/mm
Modified Files:
filemap.c memory.c mmap.c swap_state.c vmscan.c
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -C2 -r1.23 -r1.24
*** filemap.c 28 Mar 2002 13:13:03 -0000 1.23
--- filemap.c 28 Apr 2002 20:51:34 -0000 1.24
***************
*** 162,167 ****
--- 162,169 ----
if (mapping->host)
mark_inode_dirty_pages(mapping->host);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (PageTestandClearCompCache(page))
invalidate_comp_cache(mapping, page->index);
+ #endif
}
}
***************
*** 181,185 ****
--- 183,189 ----
struct page * page;
+ #ifdef CONFIG_COMP_PAGE_CACHE
invalidate_comp_pages(inode->i_mapping);
+ #endif
head = &inode->i_mapping->clean_pages;
***************
*** 340,344 ****
--- 344,350 ----
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
truncate_comp_pages(mapping, start, partial);
+ #endif
}
***************
*** 434,438 ****
int unlocked;
! try_again:
spin_lock(&pagecache_lock);
do {
--- 440,446 ----
int unlocked;
! goto try_again;
!
! try_again:
spin_lock(&pagecache_lock);
do {
***************
*** 443,446 ****
--- 451,455 ----
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
lookup_all_comp_pages(mapping);
***************
*** 449,452 ****
--- 458,462 ----
truncate_comp_pages(mapping, 0, 0);
+ #endif
}
***************
*** 568,571 ****
--- 578,583 ----
int (*writepage)(struct page *) = mapping->a_ops->writepage;
+ goto try_again;
+
try_again:
spin_lock(&pagecache_lock);
***************
*** 598,605 ****
--- 610,619 ----
}
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
lookup_all_comp_pages(mapping);
goto try_again;
}
+ #endif
return ret;
}
***************
*** 616,621 ****
--- 630,637 ----
int ret = 0;
+ #ifdef CONFIG_COMP_PAGE_CACHE
try_again:
wait_all_comp_pages(mapping);
+ #endif
spin_lock(&pagecache_lock);
***************
*** 641,646 ****
--- 657,664 ----
}
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_locked_comp_pages(mapping))
goto try_again;
+ #endif
return ret;
}
***************
*** 738,742 ****
--- 756,762 ----
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
int error = 0;
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (read_comp_cache(mapping, offset, page))
+ #endif
error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
***************
*** 990,996 ****
--- 1010,1019 ----
struct page *page, * cached_page = NULL;
+ goto repeat;
+
repeat:
spin_lock(&pagecache_lock);
page = __find_lock_page_helper(mapping, offset, *hash);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!page) {
if (!cached_page) {
***************
*** 1018,1023 ****
}
out:
if (cached_page)
! page_cache_release(cached_page);
spin_unlock(&pagecache_lock);
return page;
--- 1041,1048 ----
}
out:
+ #endif
if (cached_page)
! page_cache_release(cached_page);
!
spin_unlock(&pagecache_lock);
return page;
***************
*** 1048,1053 ****
--- 1073,1080 ----
if (newpage == NULL) {
lru_cache_add(page);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!read_comp_cache(mapping, index, page))
LockPage(page);
+ #endif
}
else
***************
*** 1055,1058 ****
--- 1082,1086 ----
}
}
+ #ifdef CONFIG_COMP_PAGE_CACHE
/*
* Invalidate compressed cache entry since it may become
***************
*** 1062,1065 ****
--- 1090,1094 ----
if (page)
flush_comp_cache(page);
+ #endif
return page;
}
***************
*** 1582,1587 ****
--- 1611,1618 ----
cached_page = NULL;
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!read_comp_cache(mapping, index, page))
goto page_ok;
+ #endif
goto readpage;
}
***************
*** 1978,1982 ****
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
! int error;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
--- 2009,2013 ----
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
! int error, in_comp_cache;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
***************
*** 2040,2044 ****
* so we need to map a zero page.
*/
! if ((pgoff < size) && !VM_RandomReadHint(area))
error = read_cluster_nonblocking(file, pgoff, size);
else
--- 2071,2082 ----
* so we need to map a zero page.
*/
! in_comp_cache = 0;
! {
! comp_cache_fragment_t * fragment;
! if (!find_comp_page(mapping, pgoff, &fragment))
! in_comp_cache = 1;
! }
!
! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache)
error = read_cluster_nonblocking(file, pgoff, size);
else
***************
*** 2861,2865 ****
struct page **hash = page_hash(mapping, index);
struct page *page, *cached_page = NULL;
! int comp_err, err = 0;
repeat:
page = __find_get_page(mapping, index, hash);
--- 2899,2903 ----
struct page **hash = page_hash(mapping, index);
struct page *page, *cached_page = NULL;
! int err = 0;
repeat:
page = __find_get_page(mapping, index, hash);
***************
*** 2875,2887 ****
cached_page = NULL;
! comp_err = read_comp_cache(mapping, index, page);
! switch (comp_err) {
! case -ENOENT:
err = filler(data, page);
! case 0:
! break;
! default:
! BUG();
! }
if (err < 0) {
--- 2913,2921 ----
cached_page = NULL;
! #ifdef CONFIG_COMP_PAGE_CACHE
! if (read_comp_cache(mapping, index, page))
! #endif
err = filler(data, page);
!
if (err < 0) {
***************
*** 2892,2897 ****
--- 2926,2933 ----
if (cached_page)
page_cache_release(cached_page);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (page)
flush_comp_cache(page);
+ #endif
return page;
}
***************
*** 2953,2956 ****
--- 2989,2993 ----
*cached_page = NULL;
}
+ #ifdef CONFIG_COMP_PAGE_CACHE
/*
* we have to invalidate the page since the caller function
***************
*** 2958,2961 ****
--- 2995,2999 ----
*/
flush_comp_cache(page);
+ #endif
return page;
}
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** memory.c 12 Mar 2002 17:54:19 -0000 1.26
--- memory.c 28 Apr 2002 20:51:34 -0000 1.27
***************
*** 1135,1139 ****
if (!page) {
! swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
--- 1135,1141 ----
if (!page) {
! comp_cache_fragment_t * fragment;
! if (find_comp_page(&swapper_space, entry.val, &fragment))
! swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
***************
*** 1179,1184 ****
mm->rss++;
pte = mk_pte(page, vma->vm_page_prot);
! if (write_access && can_share_swap_page(page))
pte = pte_mkdirty(pte_mkwrite(pte));
unlock_page(page);
--- 1181,1188 ----
mm->rss++;
pte = mk_pte(page, vma->vm_page_prot);
! if (write_access && can_share_swap_page(page)) {
pte = pte_mkdirty(pte_mkwrite(pte));
+ flush_comp_cache(page);
+ }
unlock_page(page);
Index: mmap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/mmap.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** mmap.c 26 Feb 2002 20:59:01 -0000 1.7
--- mmap.c 28 Apr 2002 20:51:34 -0000 1.8
***************
*** 82,87 ****
free += swapper_space.nrpages;
/* Let's count the free space left in compressed cache */
! free += comp_cache_free_space();
/*
--- 82,89 ----
free += swapper_space.nrpages;
+ #ifdef CONFIG_COMP_CACHE
/* Let's count the free space left in compressed cache */
! free += comp_cache_free_space;
! #endif
/*
Index: swap_state.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** swap_state.c 12 Mar 2002 17:54:19 -0000 1.26
--- swap_state.c 28 Apr 2002 20:51:34 -0000 1.27
***************
*** 226,231 ****
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (!read_comp_cache(&swapper_space, entry.val, new_page))
return new_page;
/*
--- 226,233 ----
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (!read_comp_cache(&swapper_space, entry.val, new_page)) {
! add_compressed_cache_miss();
return new_page;
+ }
/*
***************
*** 245,248 ****
--- 247,251 ----
rw_swap_page(READ, new_page);
+ add_swap_miss();
return new_page;
}
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.30
retrieving revision 1.31
diff -C2 -r1.30 -r1.31
*** vmscan.c 28 Feb 2002 19:05:04 -0000 1.30
--- vmscan.c 28 Apr 2002 20:51:34 -0000 1.31
***************
*** 410,414 ****
--- 410,418 ----
writepage = page->mapping->a_ops->writepage;
+ #ifdef CONFIG_COMP_CACHE
+ if (writepage) {
+ #else
if ((gfp_mask & __GFP_FS) && writepage) {
+ #endif
ClearPageDirty(page);
SetPageLaunder(page);
***************
*** 576,583 ****
--- 580,593 ----
unsigned long ratio;
+ /* if compressed cache is enable, we should want to have much
+ * more pressure on swap/page cache than on other caches */
+ if (comp_cache_skip_slab_shrunk())
+ goto skip_slab_cache;
+
nr_pages -= kmem_cache_reap(gfp_mask);
if (nr_pages <= 0)
return 0;
+ skip_slab_cache:
nr_pages = chunk_size;
/* try to keep the active list 2/3 of the size of the cache */
***************
*** 589,592 ****
--- 599,605 ----
return 0;
+ if (comp_cache_skip_dicache_shrunk())
+ return nr_pages;
+
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:37
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv2510/include/linux
Modified Files:
comp_cache.h
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.63
retrieving revision 1.64
diff -C2 -r1.63 -r1.64
*** comp_cache.h 26 Mar 2002 12:35:09 -0000 1.63
--- comp_cache.h 28 Apr 2002 20:51:33 -0000 1.64
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-03-26 09:18:56 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-04-22 14:55:16 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 29,33 ****
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre1"
/* maximum compressed size of a page */
--- 29,33 ----
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre2"
/* maximum compressed size of a page */
***************
*** 98,104 ****
--- 98,115 ----
/* adaptivity.c */
+ struct preset_comp_cache {
+ unsigned int size;
+ int profit;
+ };
+
+ extern struct preset_comp_cache * preset_comp_cache;
+ extern int nr_preset_sizes, current_preset_size;
+ extern int latest_uncomp_misses[], latest_miss;
+
+
#ifdef CONFIG_COMP_CACHE
int shrink_comp_cache(comp_cache_t *);
inline void grow_comp_cache(zone_t *, int);
+ void adapt_comp_cache(void);
#else
static inline int shrink_comp_cache(comp_cache_t * comp_page) { return 0; }
***************
*** 161,166 ****
#define CompFragmentClearDirty(fragment) clear_bit(CF_Dirty, &(fragment)->flags)
- #define current_msg ((algorithm == &compression_algorithms[current_algorithm])?" (current algorithm)":"")
-
#define INF 0xffffffff
--- 172,175 ----
***************
*** 196,203 ****
#define mapped(page) (!page->buffers && page_count(page) > 2)
- #define NUM_ALGORITHMS 2
- #define WKDM_IDX 0
- #define WK4X4_IDX 1
-
#define DISCARD_MARK 0.80
--- 205,208 ----
***************
*** 236,240 ****
unsigned long comp_cycles_max, comp_cycles_min;
unsigned long decomp_cycles_max, decomp_cycles_min;
! unsigned int pgccin, pgccout, discarded_pages;
} stats_summary_t;
--- 241,249 ----
unsigned long comp_cycles_max, comp_cycles_min;
unsigned long decomp_cycles_max, decomp_cycles_min;
! unsigned long comp_swap, decomp_swap;
! unsigned long comp_page, decomp_page;
! unsigned long swap_out, page_out;
! unsigned long faultin_swap, faultin_page;
! unsigned long discarded_pages;
} stats_summary_t;
***************
*** 283,290 ****
/* proc.c */
! extern comp_data_t comp_data;
! extern compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! extern int current_algorithm;
/* swapin.c */
--- 292,306 ----
/* proc.c */
! #ifdef CONFIG_COMP_CACHE
! void comp_cache_update_page_comp_stats(struct page *);
! void comp_cache_update_writeout_stats(comp_cache_fragment_t *);
! void comp_cache_update_faultin_stats(comp_cache_fragment_t *);
! void set_fragment_algorithm(comp_cache_fragment_t *, unsigned short);
! void decompress(comp_cache_fragment_t *, struct page *);
! int compress(struct page *, void *, unsigned short *);
!
! void __init comp_cache_algorithms_init(void);
! #endif
/* swapin.c */
***************
*** 314,317 ****
--- 330,337 ----
/* main.c */
#ifdef CONFIG_COMP_CACHE
+ inline int comp_cache_skip_buffer_freeing(void);
+ inline int comp_cache_skip_slab_shrunk(void);
+ inline int comp_cache_skip_dicache_shrunk(void);
+
int compress_page(struct page *, int, unsigned int);
void comp_cache_init(void);
***************
*** 319,326 ****
--- 339,362 ----
inline int compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int);
inline int compress_clean_page(struct page *, unsigned int);
+
+ extern int nr_swap_misses;
+ extern int nr_compressed_cache_misses;
+ extern unsigned long comp_cache_free_space;
+
+ #define add_swap_miss() (nr_swap_misses++)
+ #define add_compressed_cache_miss() (nr_compressed_cache_misses++)
+
#else
+ static inline int comp_cache_skip_buffer_freeing(void) { return 0; }
+ static inline int comp_cache_skip_slab_shrunk(void) { return 0; }
+ static inline int comp_cache_skip_dicache_shrunk(void) { return 0; }
+
static inline void comp_cache_init(void) {};
static inline int compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask) { return writepage(page); }
static inline int compress_clean_page(struct page * page, unsigned int gfp_mask) { return 0; }
+
+ #define add_swap_miss() (0)
+ #define add_compressed_cache_miss() (0)
+
#endif
***************
*** 379,383 ****
#define vswap_address(entry) (0)
! static inline int comp_cache_swp_duplicate(swp_entry_t entry) {};
static inline int comp_cache_swp_free(swp_entry_t entry) { return 0; }
static inline int comp_cache_swp_count(swp_entry_t entry) { return 0; }
--- 415,419 ----
#define vswap_address(entry) (0)
! static inline int comp_cache_swp_duplicate(swp_entry_t entry) { return 0; };
static inline int comp_cache_swp_free(swp_entry_t entry) { return 0; }
static inline int comp_cache_swp_count(swp_entry_t entry) { return 0; }
***************
*** 499,506 ****
/* enough memory functions */
#ifdef CONFIG_COMP_CACHE
- inline int comp_cache_free_space(void);
extern int FASTCALL(find_comp_page(struct address_space *, unsigned long, comp_cache_fragment_t **));
#else
- static inline int comp_cache_free_space(void) { return 0; }
static inline int find_comp_page(struct address_space * mapping, unsigned long offset, comp_cache_fragment_t ** fragment) { return -ENOENT; }
#endif
--- 535,540 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-03-28 13:27:47
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv7375/mm
Modified Files:
filemap.c
Log Message:
- Fixed another bug reported by Paolo Ciarrocchi. In this case, the bug
would result in a deadlock. It was caused by an allocation in
__find_lock_page_helper() function that didn't take into account the
gfp_mask of the caller function (since the gfp_mask wasn't passed as
parameter). Therefore we might call that function from somewhere whose
gfp_mask was GFP_NOFS and even then start writing out fragments, deadlocking
the kernel. To fix it, the allocation was removed from this function. The
only callers were __find_lock_page and find_or_create_page(). The latter one
doesn't need the __find_lock_page_helper() to check compressed cache, since
it already checks it itself. The former now does that by itself too.
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.22
retrieving revision 1.23
diff -C2 -r1.22 -r1.23
*** filemap.c 21 Mar 2002 19:24:17 -0000 1.22
--- filemap.c 28 Mar 2002 13:13:03 -0000 1.23
***************
*** 737,752 ****
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
! int comp_err, error = 0;
!
! comp_err = read_comp_cache(mapping, offset, page);
! switch (comp_err) {
! case -ENOENT:
error = mapping->a_ops->readpage(file, page);
- case 0:
- break;
- default:
- BUG();
- }
-
page_cache_release(page);
return error;
--- 737,743 ----
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
! int error = 0;
! if (read_comp_cache(mapping, offset, page))
error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
return error;
***************
*** 964,968 ****
unsigned long offset, struct page *hash)
{
! struct page *page, * cached_page = NULL;
/*
--- 955,959 ----
unsigned long offset, struct page *hash)
{
! struct page *page;
/*
***************
*** 987,995 ****
}
}
! else {
if (!cached_page) {
comp_cache_fragment_t * fragment;
if (find_comp_page(mapping, offset, &fragment))
goto out;
cached_page = page_cache_alloc(mapping);
goto repeat;
--- 978,1002 ----
}
}
! return page;
! }
!
! /*
! * Same as the above, but lock the page too, verifying that
! * it's still valid once we own it.
! */
! struct page * __find_lock_page (struct address_space *mapping,
! unsigned long offset, struct page **hash)
! {
! struct page *page, * cached_page = NULL;
!
! repeat:
! spin_lock(&pagecache_lock);
! page = __find_lock_page_helper(mapping, offset, *hash);
! if (!page) {
if (!cached_page) {
comp_cache_fragment_t * fragment;
if (find_comp_page(mapping, offset, &fragment))
goto out;
+ spin_unlock(&pagecache_lock);
cached_page = page_cache_alloc(mapping);
goto repeat;
***************
*** 1012,1030 ****
out:
if (cached_page)
! page_cache_release(cached_page);
! return page;
! }
!
! /*
! * Same as the above, but lock the page too, verifying that
! * it's still valid once we own it.
! */
! struct page * __find_lock_page (struct address_space *mapping,
! unsigned long offset, struct page **hash)
! {
! struct page *page;
!
! spin_lock(&pagecache_lock);
! page = __find_lock_page_helper(mapping, offset, *hash);
spin_unlock(&pagecache_lock);
return page;
--- 1019,1023 ----
out:
if (cached_page)
! page_cache_release(cached_page);
spin_unlock(&pagecache_lock);
return page;
|
|
From: Rodrigo S. de C. <rc...@us...> - 2002-03-26 12:35:14
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv24950/include/linux Modified Files: comp_cache.h Log Message: - Fixed a bug reported by Paolo Ciarrocchi (kernel BUG at vswap.c:380). The cause of this bug is the variable used for offset in vswap_address. It had been declared as unsigned short, so if we had more than 32767 entries in vswap address table, the offset field of a entry with a higher offset would report the wrong offset, causing corruption (including the one Paolo had). To fix it, offset field hass been changed to unsigned long. Also all the variables used to index when creating or resizing vswap were changed to unsigned long as well. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.62 retrieving revision 1.63 diff -C2 -r1.62 -r1.63 *** comp_cache.h 21 Mar 2002 19:24:17 -0000 1.62 --- comp_cache.h 26 Mar 2002 12:35:09 -0000 1.63 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-03-21 16:10:53 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-03-26 09:18:56 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 329,334 **** struct list_head list; ! unsigned short count; ! unsigned short offset; comp_cache_fragment_t * fragment; --- 329,334 ---- struct list_head list; ! unsigned int count; ! unsigned long offset; comp_cache_fragment_t * fragment; *************** *** 371,375 **** extern int FASTCALL(free_pte_list(struct pte_list *, unsigned long)); ! void vswap_alloc_and_init(struct vswap_address **, int); #else --- 371,375 ---- extern int FASTCALL(free_pte_list(struct pte_list *, unsigned long)); ! void vswap_alloc_and_init(struct vswap_address **, unsigned long); #else |