[lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.22,1.23 aux.c,1.30,1.31 main.c,1.44,1.45 swapo
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-06-18 12:47:26
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv12868/mm/comp_cache
Modified Files:
adaptivity.c aux.c main.c swapout.c vswap.c
Log Message:
Bug fixes
o Fixed compilation error when compressed cache is disabled
o Fixed bug that would store pages with buffers even if the page cache
support were disabled
o Fixed bug that would not account the comp pages with zero fragments in
/proc/comp_cache_hist. They were displayed in the total column, but not in
the zero fragments column.
o Fixed bug in comp_cache_fix_watermarks() that would set
zone_num_comp_pages to a bogus value of num_comp_page.
Other
o Added code to remove pages with buffers stored in the compressed
cache from LRU lists. These codes are within "#if 0" clauses.
o Allocate vswap table only when it is first used.
o Reduce compressed cache minimum size from 10 to 5%.
Index: adaptivity.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v
retrieving revision 1.22
retrieving revision 1.23
diff -C2 -r1.22 -r1.23
*** adaptivity.c 11 Jun 2002 13:20:49 -0000 1.22
--- adaptivity.c 18 Jun 2002 12:47:21 -0000 1.23
***************
*** 2,6 ****
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-06-10 13:32:33 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-06-17 17:42:23 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 510,513 ****
--- 510,516 ----
static inline int
vswap_needs_to_shrink(void) {
+ if (!vswap_address)
+ return 0;
+
if (vswap_current_num_entries <= NUM_VSWAP_ENTRIES)
return 0;
***************
*** 631,634 ****
--- 634,640 ----
static inline int
vswap_needs_to_grow(void) {
+ if (!vswap_address)
+ return 0;
+
/* using vswap_last_used instead of vswap_current_num_entries
* forces us to grow the cache even if we started shrinking
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.30
retrieving revision 1.31
diff -C2 -r1.30 -r1.31
*** aux.c 13 Jun 2002 20:18:32 -0000 1.30
--- aux.c 18 Jun 2002 12:47:21 -0000 1.31
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-06-13 17:15:16 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-06-17 16:14:31 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 232,235 ****
--- 232,236 ----
if (!comp_page->page)
num_fragments[6]++;
+ num_fragments[0]++;
break;
case 1:
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.44
retrieving revision 1.45
diff -C2 -r1.44 -r1.45
*** main.c 13 Jun 2002 20:18:33 -0000 1.44
--- main.c 18 Jun 2002 12:47:21 -0000 1.45
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-06-13 17:08:28 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-06-17 17:47:11 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 147,150 ****
--- 147,151 ----
}
+ #ifdef CONFIG_COMP_PAGE_CACHE
void
steal_page_from_comp_cache(struct page * page, struct page * new_page)
***************
*** 179,183 ****
}
! comp_cache_free_locked(fragment);
PageClearMappedCompCache(old_page);
--- 180,189 ----
}
! #if 0
! if (page != new_page)
! lru_cache_add(page);
! #endif
!
! comp_cache_free_locked(fragment);
PageClearMappedCompCache(old_page);
***************
*** 198,203 ****
BUG();
if (PageMappedCompCache(*page))
! return 0;
if (page_count(*page) != 3)
--- 204,211 ----
BUG();
+ /* if mapped comp cache pages aren't removed from LRU queues,
+ * then here we should return 1, otherwise BUG() */
if (PageMappedCompCache(*page))
! return 1;
if (page_count(*page) != 3)
***************
*** 252,258 ****
--- 260,270 ----
ret = 1;
out:
+ #if 0
+ lru_cache_del(comp_page->page);
+ #endif
comp_cache_update_page_stats(comp_page->page, 0);
return ret;
}
+ #endif
extern void __init comp_cache_hash_init(void);
***************
*** 281,285 ****
max_num_comp_pages = num_physpages * 0.5;
! min_num_comp_pages = num_physpages * 0.1;
#ifndef CONFIG_COMP_ADAPTIVITY
--- 293,297 ----
max_num_comp_pages = num_physpages * 0.5;
! min_num_comp_pages = num_physpages * 0.05;
#ifndef CONFIG_COMP_ADAPTIVITY
***************
*** 300,304 ****
#ifdef CONFIG_COMP_ADAPTIVITY
printk("Compressed Cache: adaptivity\n");
! preset_comp_cache[0].size = num_physpages * 0.10;
preset_comp_cache[1].size = num_physpages * 0.23;
preset_comp_cache[2].size = num_physpages * 0.37;
--- 312,316 ----
#ifdef CONFIG_COMP_ADAPTIVITY
printk("Compressed Cache: adaptivity\n");
! preset_comp_cache[0].size = num_physpages * 0.05;
preset_comp_cache[1].size = num_physpages * 0.23;
preset_comp_cache[2].size = num_physpages * 0.37;
***************
*** 328,334 ****
comp_cache_swp_buffer_init();
- /* virtual swap address */
comp_cache_vswap_init();
!
/* initialize each comp cache entry */
for (i = 0; i < num_comp_pages; i++) {
--- 340,345 ----
comp_cache_swp_buffer_init();
comp_cache_vswap_init();
!
/* initialize each comp cache entry */
for (i = 0; i < num_comp_pages; i++) {
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.49
retrieving revision 1.50
diff -C2 -r1.49 -r1.50
*** swapout.c 13 Jun 2002 20:18:34 -0000 1.49
--- swapout.c 18 Jun 2002 12:47:21 -0000 1.50
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-06-13 17:02:45 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-06-17 17:39:26 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 201,206 ****
}
- extern unsigned short * last_page_size;
- extern unsigned short last_page;
extern struct address_space swapper_space;
--- 201,204 ----
Index: vswap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v
retrieving revision 1.30
retrieving revision 1.31
diff -C2 -r1.30 -r1.31
*** vswap.c 15 May 2002 18:05:36 -0000 1.30
--- vswap.c 18 Jun 2002 12:47:21 -0000 1.31
***************
*** 2,6 ****
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-05-15 09:36:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-06-17 17:52:54 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 23,27 ****
kmem_cache_t * vswap_cachep;
! struct vswap_address ** vswap_address;
struct list_head vswap_address_free_head;
struct list_head vswap_address_used_head;
--- 23,27 ----
kmem_cache_t * vswap_cachep;
! struct vswap_address ** vswap_address = NULL;
struct list_head vswap_address_free_head;
struct list_head vswap_address_used_head;
***************
*** 54,57 ****
--- 54,78 ----
unsigned short last_page = 0;
+ static void
+ comp_cache_vswap_alloc(void)
+ {
+ unsigned long i;
+
+ vswap_cachep = kmem_cache_create("comp_cache_vswap", sizeof(struct vswap_address), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ vswap_address = (struct vswap_address **) vmalloc(NUM_VSWAP_ENTRIES * sizeof(struct vswap_address*));
+
+ if (!vswap_address)
+ panic("comp_cache_vswap_init(): cannot allocate vswap_address");
+
+ vswap_current_num_entries = NUM_VSWAP_ENTRIES;
+ vswap_last_used = NUM_VSWAP_ENTRIES - 1;
+ vswap_num_used_entries = 0;
+ vswap_num_swap_cache = 0;
+
+ for (i = 0; i < NUM_VSWAP_ENTRIES; i++)
+ vswap_alloc_and_init(vswap_address, i);
+ }
+
static inline int
comp_cache_mean_size(void) {
***************
*** 138,141 ****
--- 159,165 ----
entry.val = 0;
+ if (!vswap_address)
+ comp_cache_vswap_alloc();
+
if (!comp_cache_available_vswap())
return entry;
***************
*** 659,682 ****
INIT_LIST_HEAD(&(vswap_address_free_head));
INIT_LIST_HEAD(&(vswap_address_used_head));
-
- vswap_cachep = kmem_cache_create("comp_cache_vswap", sizeof(struct vswap_address), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
-
- vswap_address = (struct vswap_address **) vmalloc(NUM_VSWAP_ENTRIES * sizeof(struct vswap_address*));
-
- if (!vswap_address)
- panic("comp_cache_vswap_init(): cannot allocate vswap_address");
-
- vswap_current_num_entries = NUM_VSWAP_ENTRIES;
- vswap_last_used = NUM_VSWAP_ENTRIES - 1;
- vswap_num_used_entries = 0;
- vswap_num_swap_cache = 0;
-
- for (i = 0; i < NUM_VSWAP_ENTRIES; i++)
- vswap_alloc_and_init(vswap_address, i);
comp_cache_freeable_space = PAGE_SIZE * num_comp_pages;
last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short));
!
for (i = 0; i < NUM_MEAN_PAGES; i++)
last_page_size[i] = PAGE_SIZE/2;
--- 683,691 ----
INIT_LIST_HEAD(&(vswap_address_free_head));
INIT_LIST_HEAD(&(vswap_address_used_head));
comp_cache_freeable_space = PAGE_SIZE * num_comp_pages;
last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short));
!
for (i = 0; i < NUM_MEAN_PAGES; i++)
last_page_size[i] = PAGE_SIZE/2;
|