[lc-checkins] CVS: linux/mm filemap.c,1.30,1.31 memory.c,1.31,1.32 swap_state.c,1.31,1.32 vmscan.c,1
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-07-01 17:37:32
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv26297/mm Modified Files: filemap.c memory.c swap_state.c vmscan.c Log Message: Features o Some compressed cache functions (in particular the swap out ones) have priority parameter now, which will passed from VM main functions. The priority will show how far we should go on scanning lists to free spaces in compressed cache. o Fragments will not be decompressed when they are read from a read ahead (for swap and page cache). In this case, we only check if the fragment is in compressed cache. This police avoids LRU order to be changed. o Fragments will be removed from compressed cache if resize on demand is enabled. o Support for pages with buffers will only happen if resize on demand is disabled. Bug fixes o Fixed potential bug in __find_lock_page() where we would decompress a fragment and add a new page to page cache even if it had been just swapped in. Cleanups o Added #ifdefs in lru queues functions. o Several small cleanups Other o compress_page() only returns the page in locked state if it has been compressed. Otherwise, it will returned always unlocked. o now get_comp_cache_page() doesn't try so hard to get an entry. The maximum number of tries (which includes calls to writeout_fragments()) is 3. Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v retrieving revision 1.30 retrieving revision 1.31 diff -C2 -r1.30 -r1.31 *** filemap.c 19 Jun 2002 12:18:43 -0000 1.30 --- filemap.c 1 Jul 2002 17:37:29 -0000 1.31 *************** *** 749,754 **** * and schedules an I/O to read in its contents from disk. */ ! static int FASTCALL(page_cache_read(struct file * file, unsigned long offset, int access)); ! static int page_cache_read(struct file * file, unsigned long offset, int access) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; --- 749,757 ---- * and schedules an I/O to read in its contents from disk. */ ! #define page_cache_read(file, offset) __page_cache_read(file, offset, 0) ! #define page_cache_readahead(file, offset) __page_cache_read(file, offset, 1) ! ! static int FASTCALL(__page_cache_read(struct file * file, unsigned long offset, int readahead)); ! static int __page_cache_read(struct file * file, unsigned long offset, int readahead) { struct address_space *mapping = file->f_dentry->d_inode->i_mapping; *************** *** 766,773 **** return -ENOMEM; if (!add_to_page_cache_unique(page, mapping, offset, hash)) { int error = 0; #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, offset, page, access)) #endif error = mapping->a_ops->readpage(file, page); --- 769,784 ---- return -ENOMEM; + if (readahead) { + struct page * tmp_page = __find_page_nolock(mapping, offset, *hash); + if (!tmp_page && in_comp_cache(mapping, offset)) { + page_cache_release(page); + return 0; + } + } + if (!add_to_page_cache_unique(page, mapping, offset, hash)) { int error = 0; #ifdef CONFIG_COMP_PAGE_CACHE ! if (readahead || read_comp_cache(mapping, offset, page)) #endif error = mapping->a_ops->readpage(file, page); *************** *** 796,800 **** offset = CLUSTER_OFFSET(offset); while ((pages-- > 0) && (offset < filesize)) { ! int error = page_cache_read(file, offset, 0); if (error < 0) return error; --- 807,811 ---- offset = CLUSTER_OFFSET(offset); while ((pages-- > 0) && (offset < filesize)) { ! int error = page_cache_readahead(file, offset); if (error < 0) return error; *************** *** 1029,1037 **** page = __find_lock_page_helper(mapping, offset, *hash); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!page) { if (!cached_page) { - struct comp_cache_fragment * fragment; - if (find_comp_page(mapping, offset, &fragment)) - goto out; spin_unlock(&pagecache_lock); cached_page = page_cache_alloc(mapping); --- 1040,1045 ---- page = __find_lock_page_helper(mapping, offset, *hash); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!page && in_comp_cache(mapping, offset)) { if (!cached_page) { spin_unlock(&pagecache_lock); cached_page = page_cache_alloc(mapping); *************** *** 1039,1055 **** } ! if (TryLockPage(cached_page)) ! BUG(); ! ! if (read_comp_cache(mapping, offset, cached_page, 0)) { UnlockPage(cached_page); goto out; } page = cached_page; cached_page = NULL; - - add_to_page_cache(page, mapping, offset); - SetPageUptodate(page); } out: --- 1047,1066 ---- } ! if (add_to_page_cache_unique(cached_page, mapping, offset, hash)) ! goto out; ! ! if (read_comp_cache(mapping, offset, cached_page)) { ! __lru_cache_del(cached_page); ! __remove_inode_page(cached_page); UnlockPage(cached_page); + page_cache_release(cached_page); goto out; } + if (TryLockPage(cached_page)) + BUG(); + page = cached_page; cached_page = NULL; } out: *************** *** 1087,1094 **** lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page, 0)) { ! if (TryLockPage(page)) BUG(); - } #endif } --- 1098,1103 ---- lru_cache_add(page); #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page) && TryLockPage(page)) BUG(); #endif } *************** *** 1365,1369 **** if ((raend + ahead) >= end_index) break; ! if (page_cache_read(filp, raend + ahead, 0) < 0) break; } --- 1374,1378 ---- if ((raend + ahead) >= end_index) break; ! if (page_cache_readahead(filp, raend + ahead) < 0) break; } *************** *** 1629,1633 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page, 1)) goto page_ok; #endif --- 1638,1642 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (!read_comp_cache(mapping, index, page)) goto page_ok; #endif *************** *** 1936,1940 **** while (nr) { ! page_cache_read(file, index, 0); index++; nr--; --- 1945,1949 ---- while (nr) { ! page_cache_readahead(file, index); index++; nr--; *************** *** 2026,2030 **** struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused) { ! int error, in_comp_cache; struct file *file = area->vm_file; struct address_space *mapping = file->f_dentry->d_inode->i_mapping; --- 2035,2039 ---- struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused) { ! int error; struct file *file = area->vm_file; struct address_space *mapping = file->f_dentry->d_inode->i_mapping; *************** *** 2089,2103 **** * so we need to map a zero page. */ ! in_comp_cache = 0; ! { ! struct comp_cache_fragment * fragment; ! if (!find_comp_page(mapping, pgoff, &fragment)) ! in_comp_cache = 1; ! } ! ! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache) error = read_cluster_nonblocking(file, pgoff, size); else ! error = page_cache_read(file, pgoff, 1); /* --- 2098,2105 ---- * so we need to map a zero page. */ ! if ((pgoff < size) && !VM_RandomReadHint(area) && !in_comp_cache(mapping, pgoff)) error = read_cluster_nonblocking(file, pgoff, size); else ! error = page_cache_read(file, pgoff); /* *************** *** 2589,2593 **** } else { while ((start < end) && (start < size)) { ! error = page_cache_read(file, start, 0); start++; if (error < 0) --- 2591,2595 ---- } else { while ((start < end) && (start < size)) { ! error = page_cache_read(file, start); start++; if (error < 0) *************** *** 2932,2936 **** #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, index, page, 1)) #endif err = filler(data, page); --- 2934,2938 ---- #ifdef CONFIG_COMP_PAGE_CACHE ! if (read_comp_cache(mapping, index, page)) #endif err = filler(data, page); Index: memory.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** memory.c 19 Jun 2002 12:18:43 -0000 1.31 --- memory.c 1 Jul 2002 17:37:29 -0000 1.32 *************** *** 1109,1113 **** for (i = 0; i < num; offset++, i++) { /* Ok, do the async read-ahead now */ ! new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset)); if (!new_page) break; --- 1109,1113 ---- for (i = 0; i < num; offset++, i++) { /* Ok, do the async read-ahead now */ ! new_page = read_swap_cache_async_ahead(SWP_ENTRY(SWP_TYPE(entry), offset)); if (!new_page) break; Index: swap_state.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v retrieving revision 1.31 retrieving revision 1.32 diff -C2 -r1.31 -r1.32 *** swap_state.c 25 Jun 2002 14:34:07 -0000 1.31 --- swap_state.c 1 Jul 2002 17:37:29 -0000 1.32 *************** *** 192,196 **** * the swap entry is no longer in use. */ ! struct page * read_swap_cache_async(swp_entry_t entry) { struct page *found_page, *new_page = NULL; --- 192,196 ---- * the swap entry is no longer in use. */ ! struct page * __read_swap_cache_async(swp_entry_t entry, int readahead) { struct page *found_page, *new_page = NULL; *************** *** 220,223 **** --- 220,231 ---- } + if (readahead) { + struct page * tmp_page = find_page_nolock(&swapper_space, entry.val); + if (tmp_page) + break; + if (in_comp_cache(mapping, offset)) + return new_page; + } + /* * Associate the page with swap entry in the swap cache. *************** *** 231,239 **** err = add_to_swap_cache(new_page, entry); if (!err) { ! if (!read_comp_cache(&swapper_space, entry.val, new_page, 1)) ! return new_page; if (vswap_address(entry)) BUG(); - rw_swap_page(READ, new_page); return new_page; } --- 239,246 ---- err = add_to_swap_cache(new_page, entry); if (!err) { ! if (readahead || read_comp_cache(&swapper_space, entry.val, new_page)) ! rw_swap_page(READ, new_page); if (vswap_address(entry)) BUG(); return new_page; } Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.36 retrieving revision 1.37 diff -C2 -r1.36 -r1.37 *** vmscan.c 25 Jun 2002 14:34:07 -0000 1.36 --- vmscan.c 1 Jul 2002 17:37:29 -0000 1.37 *************** *** 421,425 **** spin_unlock(&pagemap_lru_lock); ! compress_dirty_page(page, writepage, gfp_mask); page_cache_release(page); --- 421,425 ---- spin_unlock(&pagemap_lru_lock); ! compress_dirty_page(page, writepage, gfp_mask, priority); page_cache_release(page); *************** *** 427,436 **** spin_lock(&pagemap_lru_lock); - /*** - * if we could compress, it means that - * the page has neither been mapped - * back to any process nor freed, so - * we can go on freeing it here - */ if (!PageCompCache(page)) continue; --- 427,430 ---- *************** *** 449,453 **** page_cache_get(page); ! if (comp_cache_try_to_release_page(&page, gfp_mask)) { if (!page->mapping) { /* --- 443,447 ---- page_cache_get(page); ! if (comp_cache_try_to_release_page(&page, gfp_mask, priority)) { if (!page->mapping) { /* *************** *** 524,539 **** */ if (!PageCompCache(page)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); ! if (!compress_clean_page(page, gfp_mask)) { ! UnlockPage(page); ! page_cache_release(page); ! spin_lock(&pagemap_lru_lock); continue; - } - - page_cache_release(page); - spin_lock(&pagemap_lru_lock); } #endif --- 518,533 ---- */ if (!PageCompCache(page)) { + int compressed; + page_cache_get(page); spin_unlock(&pagemap_lru_lock); + + compressed = compress_clean_page(page, gfp_mask, priority); + + page_cache_release(page); + spin_lock(&pagemap_lru_lock); ! if (!compressed) continue; } #endif |