[lc-checkins] CVS: linux/include/linux comp_cache.h,1.63,1.64
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:37
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv2510/include/linux
Modified Files:
comp_cache.h
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.63
retrieving revision 1.64
diff -C2 -r1.63 -r1.64
*** comp_cache.h 26 Mar 2002 12:35:09 -0000 1.63
--- comp_cache.h 28 Apr 2002 20:51:33 -0000 1.64
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-03-26 09:18:56 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-04-22 14:55:16 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 29,33 ****
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre1"
/* maximum compressed size of a page */
--- 29,33 ----
#include <linux/WKcommon.h>
! #define COMP_CACHE_VERSION "0.23pre2"
/* maximum compressed size of a page */
***************
*** 98,104 ****
--- 98,115 ----
/* adaptivity.c */
+ struct preset_comp_cache {
+ unsigned int size;
+ int profit;
+ };
+
+ extern struct preset_comp_cache * preset_comp_cache;
+ extern int nr_preset_sizes, current_preset_size;
+ extern int latest_uncomp_misses[], latest_miss;
+
+
#ifdef CONFIG_COMP_CACHE
int shrink_comp_cache(comp_cache_t *);
inline void grow_comp_cache(zone_t *, int);
+ void adapt_comp_cache(void);
#else
static inline int shrink_comp_cache(comp_cache_t * comp_page) { return 0; }
***************
*** 161,166 ****
#define CompFragmentClearDirty(fragment) clear_bit(CF_Dirty, &(fragment)->flags)
- #define current_msg ((algorithm == &compression_algorithms[current_algorithm])?" (current algorithm)":"")
-
#define INF 0xffffffff
--- 172,175 ----
***************
*** 196,203 ****
#define mapped(page) (!page->buffers && page_count(page) > 2)
- #define NUM_ALGORITHMS 2
- #define WKDM_IDX 0
- #define WK4X4_IDX 1
-
#define DISCARD_MARK 0.80
--- 205,208 ----
***************
*** 236,240 ****
unsigned long comp_cycles_max, comp_cycles_min;
unsigned long decomp_cycles_max, decomp_cycles_min;
! unsigned int pgccin, pgccout, discarded_pages;
} stats_summary_t;
--- 241,249 ----
unsigned long comp_cycles_max, comp_cycles_min;
unsigned long decomp_cycles_max, decomp_cycles_min;
! unsigned long comp_swap, decomp_swap;
! unsigned long comp_page, decomp_page;
! unsigned long swap_out, page_out;
! unsigned long faultin_swap, faultin_page;
! unsigned long discarded_pages;
} stats_summary_t;
***************
*** 283,290 ****
/* proc.c */
! extern comp_data_t comp_data;
! extern compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
! extern int current_algorithm;
/* swapin.c */
--- 292,306 ----
/* proc.c */
! #ifdef CONFIG_COMP_CACHE
! void comp_cache_update_page_comp_stats(struct page *);
! void comp_cache_update_writeout_stats(comp_cache_fragment_t *);
! void comp_cache_update_faultin_stats(comp_cache_fragment_t *);
! void set_fragment_algorithm(comp_cache_fragment_t *, unsigned short);
! void decompress(comp_cache_fragment_t *, struct page *);
! int compress(struct page *, void *, unsigned short *);
!
! void __init comp_cache_algorithms_init(void);
! #endif
/* swapin.c */
***************
*** 314,317 ****
--- 330,337 ----
/* main.c */
#ifdef CONFIG_COMP_CACHE
+ inline int comp_cache_skip_buffer_freeing(void);
+ inline int comp_cache_skip_slab_shrunk(void);
+ inline int comp_cache_skip_dicache_shrunk(void);
+
int compress_page(struct page *, int, unsigned int);
void comp_cache_init(void);
***************
*** 319,326 ****
--- 339,362 ----
inline int compress_dirty_page(struct page *, int (*writepage)(struct page *), unsigned int);
inline int compress_clean_page(struct page *, unsigned int);
+
+ extern int nr_swap_misses;
+ extern int nr_compressed_cache_misses;
+ extern unsigned long comp_cache_free_space;
+
+ #define add_swap_miss() (nr_swap_misses++)
+ #define add_compressed_cache_miss() (nr_compressed_cache_misses++)
+
#else
+ static inline int comp_cache_skip_buffer_freeing(void) { return 0; }
+ static inline int comp_cache_skip_slab_shrunk(void) { return 0; }
+ static inline int comp_cache_skip_dicache_shrunk(void) { return 0; }
+
static inline void comp_cache_init(void) {};
static inline int compress_dirty_page(struct page * page, int (*writepage)(struct page *), unsigned int gfp_mask) { return writepage(page); }
static inline int compress_clean_page(struct page * page, unsigned int gfp_mask) { return 0; }
+
+ #define add_swap_miss() (0)
+ #define add_compressed_cache_miss() (0)
+
#endif
***************
*** 379,383 ****
#define vswap_address(entry) (0)
! static inline int comp_cache_swp_duplicate(swp_entry_t entry) {};
static inline int comp_cache_swp_free(swp_entry_t entry) { return 0; }
static inline int comp_cache_swp_count(swp_entry_t entry) { return 0; }
--- 415,419 ----
#define vswap_address(entry) (0)
! static inline int comp_cache_swp_duplicate(swp_entry_t entry) { return 0; };
static inline int comp_cache_swp_free(swp_entry_t entry) { return 0; }
static inline int comp_cache_swp_count(swp_entry_t entry) { return 0; }
***************
*** 499,506 ****
/* enough memory functions */
#ifdef CONFIG_COMP_CACHE
- inline int comp_cache_free_space(void);
extern int FASTCALL(find_comp_page(struct address_space *, unsigned long, comp_cache_fragment_t **));
#else
- static inline int comp_cache_free_space(void) { return 0; }
static inline int find_comp_page(struct address_space * mapping, unsigned long offset, comp_cache_fragment_t ** fragment) { return -ENOENT; }
#endif
--- 535,540 ----
|