[lc-checkins] CVS: linux26/mm filemap.c,1.5,1.6 vmscan.c,1.5,1.6
Status: Beta
Brought to you by:
nitin_sf
From: Nitin G. <nit...@us...> - 2006-01-23 20:51:00
|
Update of /cvsroot/linuxcompressed/linux26/mm In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv19529/mm Modified Files: filemap.c vmscan.c Log Message: Initial (incomplete) implemetation (only page cache pages). No compress/decompress - just copy. Compiles cleanly - don't run Index: filemap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux26/mm/filemap.c,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -r1.5 -r1.6 *** filemap.c 23 Jan 2006 20:45:09 -0000 1.5 --- filemap.c 23 Jan 2006 20:50:50 -0000 1.6 *************** *** 38,41 **** --- 38,43 ---- #include <asm/mman.h> + #include <linux/ccache.h> // for struct chunk_head + static ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, *************** *** 547,551 **** unsigned long offset) { ! struct page *page; read_lock_irq(&mapping->tree_lock); --- 549,554 ---- unsigned long offset) { ! struct page *page, *newpage; ! struct chunk_head *ch; read_lock_irq(&mapping->tree_lock); *************** *** 557,560 **** --- 560,622 ---- read_unlock_irq(&mapping->tree_lock); lock_page(page); + + /* + * If PageWillCompress is set after radix tree lookup + * and after acquiring lock on page, then page must + * be in ccache now and thus 'page' now points to + * original uncompressed page instead of chunk_head. + * So, now invalidate(free) page's entry in ccache + * and make radix node point back to this page. + */ + if (PageWillCompress(page)) { + /* + * This happens if control reaches here during page compression + * and before it could be replaced in page cache in place of + * original uncompresssed page. + */ + write_lock_irq(&mapping->tree_lock); + newpage = radix_tree_lookup(&mapping->page_tree, page->index); + radix_tree_delete(&mapping->page_tree, page->index); + radix_tree_insert(&mapping->page_tree, page->index, page); + write_unlock_irq(&mapping->tree_lock); + + ClearPageWillCompress(page); + + ch = (struct chunk_head *)(page_private(newpage)); + __free_page( ch->chunk ); + kfree( (struct chunk_head *)(page_private(newpage)) ); + kfree( newpage ); + } + + /* + * In this case, the 'page' points to 'chunk_head' + * instead of a original uncompressed page. + */ + if (PageCompressed(page)) { + // get_ccache_page(page); + ch = (struct chunk_head *)(page_private(page)); + newpage = ch->chunk; + + // Restore all fields we backed up in add_to_ccache() + *newpage = *page; + set_page_private(newpage, ch->orig_private); + + /* + * Replace this 'chunk_head' in page cache back to + * original uncompressed page (stored in 'chunk') + */ + write_lock_irq(&mapping->tree_lock); + radix_tree_delete(&mapping->page_tree, page->index); + radix_tree_insert(&mapping->page_tree, page->index, newpage); + write_unlock_irq(&mapping->tree_lock); + + // Free metadata info for this page from ccache + kfree( (struct chunk_head *)(page_private(page)) ); + kfree( page ); // this 'page' point to chunk_head + + // Now 'page' points to this just uncompressed page + page = newpage; + } + read_lock_irq(&mapping->tree_lock); Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux26/mm/vmscan.c,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -r1.5 -r1.6 *** vmscan.c 23 Jan 2006 20:45:09 -0000 1.5 --- vmscan.c 23 Jan 2006 20:50:50 -0000 1.6 *************** *** 40,43 **** --- 40,45 ---- #include <linux/swapops.h> + #include <linux/ccache.h> // for struct chunk_head + /* possible outcome of pageout() */ typedef enum { *************** *** 52,55 **** --- 54,67 ---- } pageout_t; + /* + struct chunk_head { + // actually there will be no single chunk; + // instead it will have chunk list + struct page *orig_page, *chunk; + // chunk_head is stored in private field so backup here + unsigned long orig_private; + }; + */ + struct scan_control { /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ *************** *** 127,130 **** --- 139,210 ---- /* + * Heuristic to determine if page should go to ccache go here. + * Assume page is locked + */ + static int should_add_to_ccache(struct page *page) + { + if (PagePrivate(page) || PageSwapCache(page)) + return 0; + SetPageWillCompress(page); + return 1; + } + + /* + * Compress the page and add it to ccache. + * newpage is container for info to locate page in ccache. + */ + static int add_to_ccache(struct page *page) + { + struct address_space *mapping; + struct chunk_head *ch; + struct page *newpage=0, *chunk=0; + + ch = kmalloc(sizeof(struct chunk_head), GFP_KERNEL); + if (!ch) goto out; + + newpage = kmalloc(sizeof(struct page), GFP_KERNEL); + if (!newpage) goto out; + + chunk = alloc_page(GFP_KERNEL); + if (!chunk) goto out; + + ch->orig_page = page; + ch->orig_private = page_private(page); + ch->chunk = chunk; + + *newpage = *page; // backup all fields in original struct page + set_page_private(newpage, (unsigned long)ch); + ClearPageWillCompress(newpage); + + // compress(page, dest); + memcpy(page_address(chunk), page_address(page), PAGE_SIZE); + + SetPageCompressed(newpage); + + /* + * Add newpage to ccache. + * Replace entry corres. to 'page' in radix tree to 'newpage'. + * Implement a real replace - not remove then add. + */ + mapping = page->mapping; + write_lock_irq(&mapping->tree_lock); + radix_tree_delete(&mapping->page_tree, page->index); + radix_tree_insert(&mapping->page_tree, page->index, newpage); + write_unlock_irq(&mapping->tree_lock); + + unlock_page(page); + ClearPageWriteback(page); + ClearPageReclaim(page); + + return 0; // success + out: + if (ch) kfree(ch); + if (newpage) kfree(newpage); + ClearPageWillCompress(page); + return 1; + } + + + /* * From 0 .. 100. Higher means more swappy. */ *************** *** 317,320 **** --- 397,401 ---- static pageout_t pageout(struct page *page, struct address_space *mapping) { + int error = 0; /* * If the page is dirty, only perform writeback if that write *************** *** 350,353 **** --- 431,441 ---- return PAGE_KEEP; } + + if (PageWillCompress(page)) { + SetPageReclaim(page); + error = add_to_ccache(page); + } + if (!error) return PAGE_SUCCESS; + if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; *************** *** 392,396 **** int pgactivate = 0; int reclaimed = 0; ! cond_resched(); --- 480,484 ---- int pgactivate = 0; int reclaimed = 0; ! int ret = 0; cond_resched(); *************** *** 420,423 **** --- 508,518 ---- goto keep_locked; + if (PageWillCompress(page)) { + ClearPageWillCompress(page); + __put_page(page); + if (page_count(page) == 1) + goto free_it; + } + referenced = page_referenced(page, 1); /* In active use or really unfreeable? Activate it. */ *************** *** 457,461 **** } ! if (PageDirty(page)) { if (referenced) goto keep_locked; --- 552,558 ---- } ! ret = should_add_to_ccache(page); ! ! if (PageDirty(page) || PageWillCompress(page)) { if (referenced) goto keep_locked; *************** *** 472,476 **** goto activate_locked; case PAGE_SUCCESS: ! if (PageWriteback(page) || PageDirty(page)) goto keep; /* --- 569,588 ---- goto activate_locked; case PAGE_SUCCESS: ! //if (PageWriteback(page) || PageDirty(page)) ! /* This can also occur in case of async add_to_ccache() */ ! if (PageWriteback(page)) ! goto keep; ! /* ! * Writeback is complete so free it now. ! * Page has been unlocked in add_to_ccache() ! */ ! ! if (PageWillCompress(page)) { ! ClearPageWillCompress(page); ! __put_page(page); ! if (page_count(page) == 1) ! goto free_it_unlocked; // i.e. it's already unlocked ! } ! if (PageDirty(page)) goto keep; /* *************** *** 549,552 **** --- 661,665 ---- free_it: unlock_page(page); + free_it_unlocked: reclaimed++; if (!pagevec_add(&freed_pvec, page)) |