[lc-checkins] CVS: linux/mm filemap.c,1.36,1.37 vmscan.c,1.42,1.43
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-07-31 12:31:09
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv23025/mm Modified Files: filemap.c vmscan.c Log Message: Bug fixes o Fixed "kernel BUG at inode.c:518". That bug happened when, after truncating all the pages from an inode, there are still pending pages in that mapping. That scenario could occur if a certain fragment happens to waiting to lock its comp_page in order to be effectively freed. In this case, the fragment got removed from comp cache data structures, but not from the mapping data structures while waiting for its fragment->comp_page lock, so it was taken as being in the mapping (well, as a clean mapped page actually) in spite of having a zero counter. That was fixed by removing the fragment from the mapping structures too. o Fixed "oops in ext2_check_page()" bug. That bug could happen because a page, after being compressed in compress_clean_page(), could be removed from page cache (in shrink_cache()) even if it is not freeable (!is_page_cache_freeable()). So, ext2_check_page(), which assumes that the page wouldn't be removed since it has a reference on the page, accesses directly the page->mapping pointer and oopses, because that pointer is set to NULL after removing from page cache. The fix was very simple. We check, with the pagecache lock held, if the page is freeable after compressing it. In this case, it is not removed from page cache and is kept on the inactive list. Cleanup o Cleanup in lookup_comp_pages() o find_and_dirty_page() renamed to find_or_add_page() o __find_lock_page() does not read pages from comp cache if the page is not found in page cache. Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.36 retrieving revision 1.37 diff -C2 -r1.36 -r1.37 *** filemap.c 28 Jul 2002 20:48:32 -0000 1.36 --- filemap.c 31 Jul 2002 12:31:05 -0000 1.37 *************** *** 882,958 **** } - #ifdef CONFIG_COMP_CACHE - /* - * Change the swap cache page index in the page cache data structure, - * specifically the hash queue. It's used by shrink_vswap() when * - * compacting the vswap entries. - */ - void change_index_hash_queue(struct page * page, unsigned long new_index) { - struct buffer_head * buffers; - struct page ** p; - - if (!PageLocked(page)) - BUG(); - - spin_lock(&pagecache_lock); - - /* hack to avoid problems in the function to - * add pages to the hash queue, since it does - * not like pages with buffers */ - buffers = page->buffers; - page->buffers = NULL; - - remove_page_from_hash_queue(page); - page->index = new_index; - p = page_hash(page->mapping, new_index); - add_page_to_hash_queue(page, p); - - page->buffers = buffers; - - spin_unlock(&pagecache_lock); - } - - /* - * The same function as below, but doesn't invalidate the comp cache - */ - void __set_page_dirty(struct page *page) - { - if (!test_and_set_bit(PG_dirty, &page->flags)) { - struct address_space *mapping = page->mapping; - - if (mapping) { - spin_lock(&pagecache_lock); - list_del(&page->list); - list_add(&page->list, &mapping->dirty_pages); - spin_unlock(&pagecache_lock); - - if (mapping->host) - mark_inode_dirty_pages(mapping->host); - } - } - } - - /* caller (lookup_comp_page()) holds the pagecache_lock */ - int find_and_dirty_page(struct page * new_page, struct address_space *mapping, unsigned long offset, struct page **hash) - { - struct page *page = NULL; - - /* - * We scan the hash list read-only. Addition to and removal from - * the hash-list needs a held write-lock. - */ - page = __find_page_nolock(mapping, offset, *hash); - if (page) { - spin_unlock(&pagecache_lock); - __set_page_dirty(page); - return 1; - } - __add_to_page_cache(new_page, mapping, offset, hash); - spin_unlock(&pagecache_lock); - lru_cache_add(new_page); - return 0; - } - #endif - /* * a rather lightweight function, finding and getting a reference to a --- 882,885 ---- *************** *** 1029,1091 **** } /* ! * Same as the above, but lock the page too, verifying that ! * it's still valid once we own it. */ ! struct page * __find_lock_page (struct address_space *mapping, ! unsigned long offset, struct page **hash) ! { ! struct page *page, * cached_page = NULL; - goto repeat; - - repeat: spin_lock(&pagecache_lock); ! page = __find_lock_page_helper(mapping, offset, *hash); spin_unlock(&pagecache_lock); ! #ifdef CONFIG_COMP_PAGE_CACHE ! if (page) ! goto out; ! if (in_comp_cache(mapping, offset)) { ! if (!cached_page) { ! cached_page = page_cache_alloc(mapping); ! goto repeat; ! } ! LockPage(cached_page); ! ! spin_lock(&pagecache_lock); ! /* the page has been added to page cache after we ! * released the pagecache_lock spinlock */ ! if (__find_page_nolock(mapping, offset, *hash)) { spin_unlock(&pagecache_lock); - goto repeat; - } ! /* there are no page in page cache, and we hold the ! * pagecache_lock, so we can decompress the ! * fragment. In the case the fragment has been removed ! * from the compressed cache between in_comp_cache() ! * above and this read_comp_cache(), we won't have ! * problems since we hold pagecache_lock */ ! if (read_comp_cache(mapping, offset, cached_page)) { ! UnlockPage(cached_page); ! spin_unlock(&pagecache_lock); ! goto out; } ! ! page = cached_page; ! cached_page = NULL; ! __add_to_page_cache(page, mapping, offset, hash); spin_unlock(&pagecache_lock); ! lru_cache_add(page); } ! out: #endif - if (cached_page) - page_cache_release(cached_page); return page; } --- 956,1048 ---- } + #ifdef CONFIG_COMP_CACHE /* ! * Change the swap cache page index in the page cache data structure, ! * specifically the hash queue. It's used by shrink_vswap() when * ! * compacting the vswap entries. */ ! void change_index_hash_queue(struct page * page, unsigned long new_index) { ! struct buffer_head * buffers; ! struct page ** p; ! ! if (!PageLocked(page)) ! BUG(); spin_lock(&pagecache_lock); ! ! /* hack to avoid problems in the function to ! * add pages to the hash queue, since it does ! * not like pages with buffers */ ! buffers = page->buffers; ! page->buffers = NULL; ! ! remove_page_from_hash_queue(page); ! page->index = new_index; ! p = page_hash(page->mapping, new_index); ! add_page_to_hash_queue(page, p); ! ! page->buffers = buffers; ! spin_unlock(&pagecache_lock); ! } ! /* ! * The same function as below, but doesn't invalidate the comp cache ! */ ! void __set_page_dirty(struct page *page) ! { ! if (!test_and_set_bit(PG_dirty, &page->flags)) { ! struct address_space *mapping = page->mapping; ! if (mapping) { ! spin_lock(&pagecache_lock); ! list_del(&page->list); ! list_add(&page->list, &mapping->dirty_pages); spin_unlock(&pagecache_lock); ! if (mapping->host) ! mark_inode_dirty_pages(mapping->host); } ! } ! } ! struct page * ! find_or_add_page(struct page * new_page, struct address_space *mapping, unsigned long offset) ! { ! struct page *page = NULL; ! struct page **hash = page_hash(mapping, fragment->index); ! ! /* ! * We scan the hash list read-only. Addition to and removal from ! * the hash-list needs a held write-lock. ! */ ! spin_lock(&pagecache_lock); ! page = __find_lock_page_helper(mapping, offset, *hash); ! if (page) { spin_unlock(&pagecache_lock); ! return page; } ! __add_to_page_cache(new_page, mapping, offset, hash); ! spin_unlock(&pagecache_lock); ! lru_cache_add(new_page); ! return NULL; ! } #endif + /* + * Same as the above, but lock the page too, verifying that + * it's still valid once we own it. + */ + struct page * __find_lock_page (struct address_space *mapping, + unsigned long offset, struct page **hash) + { + struct page *page; + + goto repeat; + + repeat: + spin_lock(&pagecache_lock); + page = __find_lock_page_helper(mapping, offset, *hash); + spin_unlock(&pagecache_lock); return page; } *************** *** 2952,2956 **** err = filler(data, page); - if (err < 0) { page_cache_release(page); --- 2909,2912 ---- *************** *** 3022,3025 **** --- 2978,2984 ---- goto repeat; *cached_page = NULL; + #ifdef CONFIG_COMP_PAGE_CACHE + read_comp_cache(mapping, index, page); + #endif } #ifdef CONFIG_COMP_PAGE_CACHE Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.42 retrieving revision 1.43 diff -C2 -r1.42 -r1.43 *** vmscan.c 28 Jul 2002 15:47:04 -0000 1.42 --- vmscan.c 31 Jul 2002 12:31:05 -0000 1.43 *************** *** 536,539 **** --- 536,544 ---- spin_lock(&pagecache_lock); + if (!is_page_cache_freeable(page)) { + spin_unlock(&pagecache_lock); + UnlockPage(page); + continue; + } } #endif |