Thread: [lc-checkins] CVS: linux/mm filemap.c,1.23,1.24 memory.c,1.26,1.27 mmap.c,1.7,1.8 swap_state.c,1.26,
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-04-28 20:51:37
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv2510/mm
Modified Files:
filemap.c memory.c mmap.c swap_state.c vmscan.c
Log Message:
This version features a first non-functional version of compressed
cache automatic automatic adaptivity to system behaviour. It also has
many changes aiming to fix the performance drop we have in linux
kernel compilation test (check statistics for 0.23pre1 on our web
site). Our analysis isn't complete and more changes are likely to go
since a huge percentage of CPU is still not being used. Anyway, the
current changes improve compressed cache a lot, mainly compressed
cache support for page cache, and it already works much better in that
scenario.
Some detailed changes:
- Configuration options changes. Now we only make compressed cache
option available if SMP is turned off. Page cache support is an
option, that is disabled by default. There's also an option to enable
adaptivity, which is currently non-functional.
- There's no option in kernel configuration to select initial
compressed cache size any longer. It can be selected only by kernel
parameter. This parameter won't be available when adaptivity option is
enabled (since the system will configure compressed cache
automatically). In this case, initial compressed cache size is 10% of
total memory size.
- Functions cleanup: all algorithms functions and related stuff are
now in proc.c file; statistics functions were rewritten and are
simpler.
- New statistics are collected by the system, like a per-cache
analysis (swap and page cache). Statistics is much more complete and
nicer.
- Now there are functions that force the VM to skip writing dirty
buffer, shrinking slab cache, dcache and icache, since we want the
system to put much more pressure on pages from page and swap cache in
order to have these kind of pages compressed.
- Pages are removed from compressed cache in swapin if the process has
write permissions. Since the pte will be set dirty, the page will be
surely compressed again, so why keep it in the compressed cache?
- If we are swapping in and the page is not present in swap cache, we
no longer read a cluster of pages from swap device if the page is in
compressed cache. This conceptual bug forced us to read many pages
from swap device if the page was compressed in our cache, what's
wrong. The same way, that happened when a file entry was faulted in
and we service this fault. Beforehand we were forcing a cluster read
even if the page were present in compressed cache.
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -C2 -r1.23 -r1.24
*** filemap.c 28 Mar 2002 13:13:03 -0000 1.23
--- filemap.c 28 Apr 2002 20:51:34 -0000 1.24
***************
*** 162,167 ****
--- 162,169 ----
if (mapping->host)
mark_inode_dirty_pages(mapping->host);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (PageTestandClearCompCache(page))
invalidate_comp_cache(mapping, page->index);
+ #endif
}
}
***************
*** 181,185 ****
--- 183,189 ----
struct page * page;
+ #ifdef CONFIG_COMP_PAGE_CACHE
invalidate_comp_pages(inode->i_mapping);
+ #endif
head = &inode->i_mapping->clean_pages;
***************
*** 340,344 ****
--- 344,350 ----
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
truncate_comp_pages(mapping, start, partial);
+ #endif
}
***************
*** 434,438 ****
int unlocked;
! try_again:
spin_lock(&pagecache_lock);
do {
--- 440,446 ----
int unlocked;
! goto try_again;
!
! try_again:
spin_lock(&pagecache_lock);
do {
***************
*** 443,446 ****
--- 451,455 ----
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
lookup_all_comp_pages(mapping);
***************
*** 449,452 ****
--- 458,462 ----
truncate_comp_pages(mapping, 0, 0);
+ #endif
}
***************
*** 568,571 ****
--- 578,583 ----
int (*writepage)(struct page *) = mapping->a_ops->writepage;
+ goto try_again;
+
try_again:
spin_lock(&pagecache_lock);
***************
*** 598,605 ****
--- 610,619 ----
}
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
lookup_all_comp_pages(mapping);
goto try_again;
}
+ #endif
return ret;
}
***************
*** 616,621 ****
--- 630,637 ----
int ret = 0;
+ #ifdef CONFIG_COMP_PAGE_CACHE
try_again:
wait_all_comp_pages(mapping);
+ #endif
spin_lock(&pagecache_lock);
***************
*** 641,646 ****
--- 657,664 ----
}
spin_unlock(&pagecache_lock);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_locked_comp_pages(mapping))
goto try_again;
+ #endif
return ret;
}
***************
*** 738,742 ****
--- 756,762 ----
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
int error = 0;
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (read_comp_cache(mapping, offset, page))
+ #endif
error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
***************
*** 990,996 ****
--- 1010,1019 ----
struct page *page, * cached_page = NULL;
+ goto repeat;
+
repeat:
spin_lock(&pagecache_lock);
page = __find_lock_page_helper(mapping, offset, *hash);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!page) {
if (!cached_page) {
***************
*** 1018,1023 ****
}
out:
if (cached_page)
! page_cache_release(cached_page);
spin_unlock(&pagecache_lock);
return page;
--- 1041,1048 ----
}
out:
+ #endif
if (cached_page)
! page_cache_release(cached_page);
!
spin_unlock(&pagecache_lock);
return page;
***************
*** 1048,1053 ****
--- 1073,1080 ----
if (newpage == NULL) {
lru_cache_add(page);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!read_comp_cache(mapping, index, page))
LockPage(page);
+ #endif
}
else
***************
*** 1055,1058 ****
--- 1082,1086 ----
}
}
+ #ifdef CONFIG_COMP_PAGE_CACHE
/*
* Invalidate compressed cache entry since it may become
***************
*** 1062,1065 ****
--- 1090,1094 ----
if (page)
flush_comp_cache(page);
+ #endif
return page;
}
***************
*** 1582,1587 ****
--- 1611,1618 ----
cached_page = NULL;
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (!read_comp_cache(mapping, index, page))
goto page_ok;
+ #endif
goto readpage;
}
***************
*** 1978,1982 ****
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
! int error;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
--- 2009,2013 ----
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
! int error, in_comp_cache;
struct file *file = area->vm_file;
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
***************
*** 2040,2044 ****
* so we need to map a zero page.
*/
! if ((pgoff < size) && !VM_RandomReadHint(area))
error = read_cluster_nonblocking(file, pgoff, size);
else
--- 2071,2082 ----
* so we need to map a zero page.
*/
! in_comp_cache = 0;
! {
! comp_cache_fragment_t * fragment;
! if (!find_comp_page(mapping, pgoff, &fragment))
! in_comp_cache = 1;
! }
!
! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache)
error = read_cluster_nonblocking(file, pgoff, size);
else
***************
*** 2861,2865 ****
struct page **hash = page_hash(mapping, index);
struct page *page, *cached_page = NULL;
! int comp_err, err = 0;
repeat:
page = __find_get_page(mapping, index, hash);
--- 2899,2903 ----
struct page **hash = page_hash(mapping, index);
struct page *page, *cached_page = NULL;
! int err = 0;
repeat:
page = __find_get_page(mapping, index, hash);
***************
*** 2875,2887 ****
cached_page = NULL;
! comp_err = read_comp_cache(mapping, index, page);
! switch (comp_err) {
! case -ENOENT:
err = filler(data, page);
! case 0:
! break;
! default:
! BUG();
! }
if (err < 0) {
--- 2913,2921 ----
cached_page = NULL;
! #ifdef CONFIG_COMP_PAGE_CACHE
! if (read_comp_cache(mapping, index, page))
! #endif
err = filler(data, page);
!
if (err < 0) {
***************
*** 2892,2897 ****
--- 2926,2933 ----
if (cached_page)
page_cache_release(cached_page);
+ #ifdef CONFIG_COMP_PAGE_CACHE
if (page)
flush_comp_cache(page);
+ #endif
return page;
}
***************
*** 2953,2956 ****
--- 2989,2993 ----
*cached_page = NULL;
}
+ #ifdef CONFIG_COMP_PAGE_CACHE
/*
* we have to invalidate the page since the caller function
***************
*** 2958,2961 ****
--- 2995,2999 ----
*/
flush_comp_cache(page);
+ #endif
return page;
}
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** memory.c 12 Mar 2002 17:54:19 -0000 1.26
--- memory.c 28 Apr 2002 20:51:34 -0000 1.27
***************
*** 1135,1139 ****
if (!page) {
! swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
--- 1135,1141 ----
if (!page) {
! comp_cache_fragment_t * fragment;
! if (find_comp_page(&swapper_space, entry.val, &fragment))
! swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
***************
*** 1179,1184 ****
mm->rss++;
pte = mk_pte(page, vma->vm_page_prot);
! if (write_access && can_share_swap_page(page))
pte = pte_mkdirty(pte_mkwrite(pte));
unlock_page(page);
--- 1181,1188 ----
mm->rss++;
pte = mk_pte(page, vma->vm_page_prot);
! if (write_access && can_share_swap_page(page)) {
pte = pte_mkdirty(pte_mkwrite(pte));
+ flush_comp_cache(page);
+ }
unlock_page(page);
Index: mmap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/mmap.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** mmap.c 26 Feb 2002 20:59:01 -0000 1.7
--- mmap.c 28 Apr 2002 20:51:34 -0000 1.8
***************
*** 82,87 ****
free += swapper_space.nrpages;
/* Let's count the free space left in compressed cache */
! free += comp_cache_free_space();
/*
--- 82,89 ----
free += swapper_space.nrpages;
+ #ifdef CONFIG_COMP_CACHE
/* Let's count the free space left in compressed cache */
! free += comp_cache_free_space;
! #endif
/*
Index: swap_state.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** swap_state.c 12 Mar 2002 17:54:19 -0000 1.26
--- swap_state.c 28 Apr 2002 20:51:34 -0000 1.27
***************
*** 226,231 ****
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (!read_comp_cache(&swapper_space, entry.val, new_page))
return new_page;
/*
--- 226,233 ----
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (!read_comp_cache(&swapper_space, entry.val, new_page)) {
! add_compressed_cache_miss();
return new_page;
+ }
/*
***************
*** 245,248 ****
--- 247,251 ----
rw_swap_page(READ, new_page);
+ add_swap_miss();
return new_page;
}
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.30
retrieving revision 1.31
diff -C2 -r1.30 -r1.31
*** vmscan.c 28 Feb 2002 19:05:04 -0000 1.30
--- vmscan.c 28 Apr 2002 20:51:34 -0000 1.31
***************
*** 410,414 ****
--- 410,418 ----
writepage = page->mapping->a_ops->writepage;
+ #ifdef CONFIG_COMP_CACHE
+ if (writepage) {
+ #else
if ((gfp_mask & __GFP_FS) && writepage) {
+ #endif
ClearPageDirty(page);
SetPageLaunder(page);
***************
*** 576,583 ****
--- 580,593 ----
unsigned long ratio;
+ /* if compressed cache is enable, we should want to have much
+ * more pressure on swap/page cache than on other caches */
+ if (comp_cache_skip_slab_shrunk())
+ goto skip_slab_cache;
+
nr_pages -= kmem_cache_reap(gfp_mask);
if (nr_pages <= 0)
return 0;
+ skip_slab_cache:
nr_pages = chunk_size;
/* try to keep the active list 2/3 of the size of the cache */
***************
*** 589,592 ****
--- 599,605 ----
return 0;
+ if (comp_cache_skip_dicache_shrunk())
+ return nr_pages;
+
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
|