[lc-checkins] CVS: linux/include/linux comp_cache.h,1.74,1.75
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-06-18 12:47:25
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv12868/include/linux
Modified Files:
comp_cache.h
Log Message:
Bug fixes
o Fixed compilation error when compressed cache is disabled
o Fixed bug that would store pages with buffers even if the page cache
support were disabled
o Fixed bug that would not account the comp pages with zero fragments in
/proc/comp_cache_hist. They were displayed in the total column, but not in
the zero fragments column.
o Fixed bug in comp_cache_fix_watermarks() that would set
zone_num_comp_pages to a bogus value of num_comp_page.
Other
o Added code to remove pages with buffers stored in the compressed
cache from LRU lists. These codes are within "#if 0" clauses.
o Allocate vswap table only when it is first used.
o Reduce compressed cache minimum size from 10 to 5%.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.74
retrieving revision 1.75
diff -C2 -r1.74 -r1.75
*** comp_cache.h 13 Jun 2002 20:18:29 -0000 1.74
--- comp_cache.h 18 Jun 2002 12:47:21 -0000 1.75
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-06-13 10:36:18 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2002-06-17 17:39:43 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 326,330 ****
#define there_are_locked_comp_pages(mapping) (!list_empty(&(mapping)->locked_comp_pages))
#else
! static inline int read_comp_cache(struct address_space * mapping, unsigned long offset, struct page * page) { return -ENOENT; }
static inline int invalidate_comp_cache(struct address_space * mapping, unsigned long offset) { return -ENOENT; }
static inline int flush_comp_cache(struct page * page) { return -ENOENT; }
--- 326,330 ----
#define there_are_locked_comp_pages(mapping) (!list_empty(&(mapping)->locked_comp_pages))
#else
! static inline int read_comp_cache(struct address_space * mapping, unsigned long offset, struct page * page, int access) { return -ENOENT; }
static inline int invalidate_comp_cache(struct address_space * mapping, unsigned long offset) { return -ENOENT; }
static inline int flush_comp_cache(struct page * page) { return -ENOENT; }
***************
*** 345,351 ****
inline int compress_clean_page(struct page *, unsigned int);
- void steal_page_from_comp_cache(struct page *, struct page *);
- int comp_cache_try_to_release_page(struct page **, int);
-
extern int nr_swap_misses;
extern int nr_compressed_cache_misses;
--- 345,348 ----
***************
*** 362,368 ****
static inline int compress_clean_page(struct page * page, unsigned int gfp_mask) { return 0; }
- static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {};
- static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask) { return try_to_release_page(*page, gfp_mask); }
-
#define add_swap_miss() (0)
#define add_compressed_cache_miss() (0)
--- 359,362 ----
***************
*** 370,373 ****
--- 364,380 ----
#endif
+ #ifdef CONFIG_COMP_PAGE_CACHE
+
+ int comp_cache_try_to_release_page(struct page **, int);
+ void steal_page_from_comp_cache(struct page *, struct page *);
+
+ #else
+
+ static inline int comp_cache_try_to_release_page(struct page ** page, int gfp_mask) { return try_to_release_page(*page, gfp_mask); }
+ static inline void steal_page_from_comp_cache(struct page * page, struct page * new_page) {};
+
+ #endif
+
+
/* vswap.c */
struct vswap_address {
***************
*** 395,398 ****
--- 402,408 ----
extern unsigned long vswap_num_swap_cache;
extern unsigned int vswap_last_used;
+
+ extern unsigned short * last_page_size;
+ extern unsigned short last_page;
#define COMP_CACHE_SWP_TYPE MAX_SWAPFILES
|