[lc-checkins] CVS: linux/mm filemap.c,1.34,1.35 swap_state.c,1.37,1.38 vmscan.c,1.41,1.42
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-07-28 15:47:07
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv26313/mm
Modified Files:
filemap.c swap_state.c vmscan.c
Log Message:
Features
o First page cache support for preempted kernels is implemented.
o Fragments have a "count" field that stores the number of references
to the fragment, so we don't have to worry about it getting freed in
the middle of an operation. That tries to fix a highly potential
source of bugs.
Bug fixes
o Fix memory accountancy for double page sizes. Meminfo was broken for
8K pages.
o truncate_list_comp_pages() could try to truncate fragments that were
in locked_comp_pages list, which is bogus. Only swap buffers list are
on this list, and are listed there only for wait_comp_pages().
o when writing out fragments, we didn't pay attention to the return
value, so we may end up freeing a fragment (when refilling swap
buffer) even if the writepage failed. In particular, ramfs, ramdisk
and other memory file systems always fail to write out its pages. Now
we check if the swap buffer page has been set dirty (the writepage()
usually does that after failing to write a page), moving back the
fragment to the dirty list (and of course not freeing the fragment).
o fixed bug that would corrupt the swap buffer list. A bug in the
variable that returned the error code could return error even if a
fragment was found afterall, so the caller function would backout the
writeout operation, leaving the swap buffer locked on the used list,
and it wouldn't never get unlocked.
o account writeout stats only for pages that have been actually
submitted to IO operation.
o fixed bug that would deadlock a system with comp_cache that has page
cache support. The lookup_comp_pages() function may be called from the
following code path: __sync_one() -> filemap_fdatasync(). This code
path tries to sync an inode (and keeps it locked while it is
syncing). However, that very inode can be also in the clear path
(clear_inode() function, called in the exit process path) which will
lock the super block and then wait for inode if it is locked (what
happens with an inode syncing). Since the allocation path may write
pages, which may need to lock the same super block, it will deadlock,
because the super block is locked by the exit path explained
above. So, we end up not being able to allocate the page (in order to
finish this function and unlock the inode) _and_ the super block won't
be unlocked since the inode doesn't get unlocked either. The fix was
to allocate pages with GFP_NOFS mask.
Cleanups
o Some functions were renamed.
o Compression algorithms (removed unnecessary data structures that
were allocated, made some structures to be allocated statically in the
algorithms, some data statically allocated are now kmalloc())
o Removed /proc/sys/vm/comp_cache/actual_size, it doesn't make sense
with resizing on demand.
Others
o Compressed cache only resizes on demand.
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.34
retrieving revision 1.35
diff -C2 -r1.34 -r1.35
*** filemap.c 16 Jul 2002 18:41:55 -0000 1.34
--- filemap.c 28 Jul 2002 15:47:04 -0000 1.35
***************
*** 183,190 ****
struct page * page;
- #ifdef CONFIG_COMP_PAGE_CACHE
- invalidate_comp_pages(inode->i_mapping);
- #endif
-
head = &inode->i_mapping->clean_pages;
--- 183,186 ----
***************
*** 193,196 ****
--- 189,196 ----
curr = head->next;
+ #ifdef CONFIG_COMP_PAGE_CACHE
+ invalidate_comp_pages(inode->i_mapping);
+ #endif
+
while (curr != head) {
page = list_entry(curr, struct page, list);
***************
*** 341,350 ****
unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
} while (unlocked);
- /* Traversed all three lists without dropping the lock */
- spin_unlock(&pagecache_lock);
-
#ifdef CONFIG_COMP_PAGE_CACHE
truncate_comp_pages(mapping, start, partial);
#endif
}
--- 341,349 ----
unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
} while (unlocked);
#ifdef CONFIG_COMP_PAGE_CACHE
truncate_comp_pages(mapping, start, partial);
#endif
+ /* Traversed all three lists without dropping the lock */
+ spin_unlock(&pagecache_lock);
}
***************
*** 449,457 ****
unlocked |= invalidate_list_pages2(&mapping->locked_pages);
} while (unlocked);
- spin_unlock(&pagecache_lock);
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
! lookup_all_comp_pages(mapping);
goto try_again;
}
--- 448,455 ----
unlocked |= invalidate_list_pages2(&mapping->locked_pages);
} while (unlocked);
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
! lookup_comp_pages(mapping);
goto try_again;
}
***************
*** 459,462 ****
--- 457,461 ----
truncate_comp_pages(mapping, 0, 0);
#endif
+ spin_unlock(&pagecache_lock);
}
***************
*** 609,619 ****
spin_lock(&pagecache_lock);
}
- spin_unlock(&pagecache_lock);
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
! lookup_all_comp_pages(mapping);
goto try_again;
}
#endif
return ret;
}
--- 608,618 ----
spin_lock(&pagecache_lock);
}
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_dirty_comp_pages(mapping)) {
! lookup_comp_pages(mapping);
goto try_again;
}
#endif
+ spin_unlock(&pagecache_lock);
return ret;
}
***************
*** 630,640 ****
int ret = 0;
#ifdef CONFIG_COMP_PAGE_CACHE
try_again:
! wait_all_comp_pages(mapping);
#endif
- spin_lock(&pagecache_lock);
-
while (!list_empty(&mapping->locked_pages)) {
struct page *page = list_entry(mapping->locked_pages.next, struct page, list);
--- 629,639 ----
int ret = 0;
+ spin_lock(&pagecache_lock);
+
#ifdef CONFIG_COMP_PAGE_CACHE
try_again:
! wait_comp_pages(mapping);
#endif
while (!list_empty(&mapping->locked_pages)) {
struct page *page = list_entry(mapping->locked_pages.next, struct page, list);
***************
*** 656,664 ****
spin_lock(&pagecache_lock);
}
- spin_unlock(&pagecache_lock);
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_locked_comp_pages(mapping))
goto try_again;
#endif
return ret;
}
--- 655,663 ----
spin_lock(&pagecache_lock);
}
#ifdef CONFIG_COMP_PAGE_CACHE
if (there_are_locked_comp_pages(mapping))
goto try_again;
#endif
+ spin_unlock(&pagecache_lock);
return ret;
}
***************
*** 758,762 ****
if (readahead) {
! struct page * tmp_page = __find_page_nolock(mapping, offset, *hash);
if (!tmp_page && in_comp_cache(mapping, offset)) {
page_cache_release(page);
--- 757,765 ----
if (readahead) {
! struct page * tmp_page;
! spin_lock(&pagecache_lock);
! tmp_page = __find_page_nolock(mapping, offset, *hash);
! spin_unlock(&pagecache_lock);
!
if (!tmp_page && in_comp_cache(mapping, offset)) {
page_cache_release(page);
***************
*** 768,774 ****
int error = 0;
#ifdef CONFIG_COMP_PAGE_CACHE
! if (readahead || read_comp_cache(mapping, offset, page))
#endif
! error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
return error;
--- 771,783 ----
int error = 0;
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!readahead) {
! if (!read_comp_cache(mapping, offset, page)) {
! UnlockPage(page);
! page_cache_release(page);
! return error;
! }
! }
#endif
! error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
return error;
***************
*** 886,889 ****
--- 895,900 ----
BUG();
+ spin_lock(&pagecache_lock);
+
/* hack to avoid problems in the function to
* add pages to the hash queue, since it does
***************
*** 898,901 ****
--- 909,914 ----
page->buffers = buffers;
+
+ spin_unlock(&pagecache_lock);
}
***************
*** 920,938 ****
}
! struct page * find_and_dirty_page(struct address_space *mapping,
! unsigned long offset, struct page **hash)
{
! struct page *page;
! /*
! * We scan the hash list read-only. Addition to and removal from
! * the hash-list needs a held write-lock.
! */
! spin_lock(&pagecache_lock);
! page = __find_page_nolock(mapping, offset, *hash);
! if (page)
! __set_page_dirty(page);
! spin_unlock(&pagecache_lock);
! return page;
}
#endif
--- 933,955 ----
}
! /* caller (lookup_comp_page()) holds the pagecache_lock */
! int find_and_dirty_page(struct page * new_page, struct address_space *mapping, unsigned long offset, struct page **hash)
{
! struct page *page = NULL;
! /*
! * We scan the hash list read-only. Addition to and removal from
! * the hash-list needs a held write-lock.
! */
! page = __find_page_nolock(mapping, offset, *hash);
! if (page) {
! __set_page_dirty(page);
! return 1;
! }
! __add_to_page_cache(new_page, mapping, offset, hash);
! spin_unlock(&pagecache_lock);
! lru_cache_add(new_page);
! spin_lock(&pagecache_lock);
! return 0;
}
#endif
***************
*** 1026,1053 ****
spin_lock(&pagecache_lock);
page = __find_lock_page_helper(mapping, offset, *hash);
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!page && in_comp_cache(mapping, offset)) {
if (!cached_page) {
- spin_unlock(&pagecache_lock);
cached_page = page_cache_alloc(mapping);
goto repeat;
}
! if (add_to_page_cache_unique(cached_page, mapping, offset, hash))
! goto out;
if (read_comp_cache(mapping, offset, cached_page)) {
- __lru_cache_del(cached_page);
- __remove_inode_page(cached_page);
UnlockPage(cached_page);
! page_cache_release(cached_page);
goto out;
}
!
! if (TryLockPage(cached_page))
! BUG();
!
page = cached_page;
cached_page = NULL;
}
out:
--- 1043,1085 ----
spin_lock(&pagecache_lock);
page = __find_lock_page_helper(mapping, offset, *hash);
+ spin_unlock(&pagecache_lock);
#ifdef CONFIG_COMP_PAGE_CACHE
! if (page)
! goto out;
! if (in_comp_cache(mapping, offset)) {
if (!cached_page) {
cached_page = page_cache_alloc(mapping);
goto repeat;
}
! LockPage(cached_page);
!
! spin_lock(&pagecache_lock);
+ /* the page has been added to page cache after we
+ * released the pagecache_lock spinlock */
+ if (__find_page_nolock(mapping, offset, *hash)) {
+ spin_unlock(&pagecache_lock);
+ goto repeat;
+ }
+
+ /* there are no page in page cache, and we hold the
+ * pagecache_lock, so we can decompress the
+ * fragment. In the case the fragment has been removed
+ * from the compressed cache between in_comp_cache()
+ * above and this read_comp_cache(), we won't have
+ * problems since we hold pagecache_lock */
if (read_comp_cache(mapping, offset, cached_page)) {
UnlockPage(cached_page);
! spin_unlock(&pagecache_lock);
goto out;
}
!
page = cached_page;
cached_page = NULL;
+
+ __add_to_page_cache(page, mapping, offset, hash);
+ spin_unlock(&pagecache_lock);
+ lru_cache_add(page);
}
out:
***************
*** 1056,1060 ****
page_cache_release(cached_page);
- spin_unlock(&pagecache_lock);
return page;
}
--- 1088,1091 ----
***************
*** 1085,1090 ****
lru_cache_add(page);
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!read_comp_cache(mapping, index, page) && TryLockPage(page))
! BUG();
#endif
}
--- 1116,1120 ----
lru_cache_add(page);
#ifdef CONFIG_COMP_PAGE_CACHE
! read_comp_cache(mapping, index, page);
#endif
}
***************
*** 1618,1623 ****
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!read_comp_cache(mapping, index, page))
goto page_ok;
#endif
goto readpage;
--- 1648,1655 ----
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!read_comp_cache(mapping, index, page)) {
! UnlockPage(page);
goto page_ok;
+ }
#endif
goto readpage;
***************
*** 2913,2919 ****
#ifdef CONFIG_COMP_PAGE_CACHE
! if (read_comp_cache(mapping, index, page))
#endif
! err = filler(data, page);
--- 2945,2954 ----
#ifdef CONFIG_COMP_PAGE_CACHE
! if (!read_comp_cache(mapping, index, page)) {
! UnlockPage(page);
! goto out;
! }
#endif
! err = filler(data, page);
***************
*** 2926,2931 ****
page_cache_release(cached_page);
#ifdef CONFIG_COMP_PAGE_CACHE
! if (page)
! flush_comp_cache(page);
#endif
return page;
--- 2961,2966 ----
page_cache_release(cached_page);
#ifdef CONFIG_COMP_PAGE_CACHE
! out:
! flush_comp_cache(page);
#endif
return page;
Index: swap_state.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v
retrieving revision 1.37
retrieving revision 1.38
diff -C2 -r1.37 -r1.38
*** swap_state.c 16 Jul 2002 18:41:55 -0000 1.37
--- swap_state.c 28 Jul 2002 15:47:04 -0000 1.38
***************
*** 234,242 ****
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (readahead || read_comp_cache(&swapper_space, entry.val, new_page)) {
! if (vswap_address(entry))
! BUG();
! rw_swap_page(READ, new_page);
}
return new_page;
}
--- 234,247 ----
err = add_to_swap_cache(new_page, entry);
if (!err) {
! if (!readahead) {
! if (!read_comp_cache(&swapper_space, entry.val, new_page)) {
! UnlockPage(new_page);
! return new_page;
! }
}
+ if (vswap_address(entry))
+ BUG();
+
+ rw_swap_page(READ, new_page);
return new_page;
}
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.41
retrieving revision 1.42
diff -C2 -r1.41 -r1.42
*** vmscan.c 16 Jul 2002 18:41:55 -0000 1.41
--- vmscan.c 28 Jul 2002 15:47:04 -0000 1.42
***************
*** 416,419 ****
--- 416,420 ----
#endif
{
+ int compressed;
ClearPageDirty(page);
SetPageLaunder(page);
***************
*** 421,425 ****
spin_unlock(&pagemap_lru_lock);
! compress_dirty_page(page, writepage, gfp_mask, priority);
page_cache_release(page);
--- 422,426 ----
spin_unlock(&pagemap_lru_lock);
! compressed = compress_dirty_page(page, writepage, gfp_mask, priority);
page_cache_release(page);
***************
*** 427,431 ****
spin_lock(&pagemap_lru_lock);
! if (!PageCompCache(page))
continue;
}
--- 428,432 ----
spin_lock(&pagemap_lru_lock);
! if (!compressed)
continue;
}
***************
*** 521,524 ****
--- 522,526 ----
page_cache_get(page);
+ spin_unlock(&pagecache_lock);
spin_unlock(&pagemap_lru_lock);
***************
*** 528,533 ****
spin_lock(&pagemap_lru_lock);
! if (!compressed)
continue;
}
#endif
--- 530,539 ----
spin_lock(&pagemap_lru_lock);
! if (!compressed) {
! UnlockPage(page);
continue;
+ }
+
+ spin_lock(&pagecache_lock);
}
#endif
***************
*** 629,638 ****
do {
nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages);
! if (nr_pages <= 0) {
! #if defined(CONFIG_COMP_CACHE) && !defined(CONFIG_COMP_DEMAND_RESIZE)
! grow_comp_cache(SWAP_CLUSTER_MAX/2);
! #endif
return 1;
- }
} while (--priority);
--- 635,640 ----
do {
nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages);
! if (nr_pages <= 0)
return 1;
} while (--priority);
|