Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv7375/mm
Modified Files:
filemap.c
Log Message:
- Fixed another bug reported by Paolo Ciarrocchi. In this case, the bug
would result in a deadlock. It was caused by an allocation in
__find_lock_page_helper() function that didn't take into account the
gfp_mask of the caller function (since the gfp_mask wasn't passed as
parameter). Therefore we might call that function from somewhere whose
gfp_mask was GFP_NOFS and even then start writing out fragments, deadlocking
the kernel. To fix it, the allocation was removed from this function. The
only callers were __find_lock_page and find_or_create_page(). The latter one
doesn't need the __find_lock_page_helper() to check compressed cache, since
it already checks it itself. The former now does that by itself too.
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.22
retrieving revision 1.23
diff -C2 -r1.22 -r1.23
*** filemap.c 21 Mar 2002 19:24:17 -0000 1.22
--- filemap.c 28 Mar 2002 13:13:03 -0000 1.23
***************
*** 737,752 ****
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
! int comp_err, error = 0;
!
! comp_err = read_comp_cache(mapping, offset, page);
! switch (comp_err) {
! case -ENOENT:
error = mapping->a_ops->readpage(file, page);
- case 0:
- break;
- default:
- BUG();
- }
-
page_cache_release(page);
return error;
--- 737,743 ----
if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
! int error = 0;
! if (read_comp_cache(mapping, offset, page))
error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
return error;
***************
*** 964,968 ****
unsigned long offset, struct page *hash)
{
! struct page *page, * cached_page = NULL;
/*
--- 955,959 ----
unsigned long offset, struct page *hash)
{
! struct page *page;
/*
***************
*** 987,995 ****
}
}
! else {
if (!cached_page) {
comp_cache_fragment_t * fragment;
if (find_comp_page(mapping, offset, &fragment))
goto out;
cached_page = page_cache_alloc(mapping);
goto repeat;
--- 978,1002 ----
}
}
! return page;
! }
!
! /*
! * Same as the above, but lock the page too, verifying that
! * it's still valid once we own it.
! */
! struct page * __find_lock_page (struct address_space *mapping,
! unsigned long offset, struct page **hash)
! {
! struct page *page, * cached_page = NULL;
!
! repeat:
! spin_lock(&pagecache_lock);
! page = __find_lock_page_helper(mapping, offset, *hash);
! if (!page) {
if (!cached_page) {
comp_cache_fragment_t * fragment;
if (find_comp_page(mapping, offset, &fragment))
goto out;
+ spin_unlock(&pagecache_lock);
cached_page = page_cache_alloc(mapping);
goto repeat;
***************
*** 1012,1030 ****
out:
if (cached_page)
! page_cache_release(cached_page);
! return page;
! }
!
! /*
! * Same as the above, but lock the page too, verifying that
! * it's still valid once we own it.
! */
! struct page * __find_lock_page (struct address_space *mapping,
! unsigned long offset, struct page **hash)
! {
! struct page *page;
!
! spin_lock(&pagecache_lock);
! page = __find_lock_page_helper(mapping, offset, *hash);
spin_unlock(&pagecache_lock);
return page;
--- 1019,1023 ----
out:
if (cached_page)
! page_cache_release(cached_page);
spin_unlock(&pagecache_lock);
return page;
|