[lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.3,1.4 aux.c,1.11,1.12 free.c,1.15,1.16 main.c,
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-01-14 12:05:11
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv13325/mm/comp_cache Modified Files: adaptivity.c aux.c free.c main.c swapin.c swapout.c vswap.c Log Message: This batch of changes still includes lots of cleanups and code rewrite to make it simpler. Perfomance increase has been noticed too. - number_of_pages in comp_cache_t removed. We can check if there are no fragments by fragments list. - vswap: no semaphore is needed. I have no idea why the functions {lock,unlock}_vswap has once been added. I can't see why they are needed. So they were removed. The same for real_entry field in struct vswap_address. - vswap: a new function has been added, namely add_fragment_vswap(), analogous to remove_fragment_vswap(). It's called from get_comp_cache_page() and it's a great hand to make things modular. - vm_enough_memory(): now we take into account compressed cache space when allowing an application to allocate memory. That is done calling a function named comp_cache_free_space() which returns, based upon the estimated_free_space, the number of pages that still can be compressed. - move_and_fix_fragments() deleted. comp_cache_free() has a new police to not move data to and fro all the time like before. We free the fragment but leave it there waiting for being merged with the free space. It's pretty simple, check the code. The new code has two new functions: merge_right_neighbour() and merge_left_neighbour(). - the fragments list is kept sorted by offset field. So, when freeing, we don't have to search for the next and previous fragments everytime. Since most of times it's just a plain list_add_tail() in get_comp_cache_page(), that makes the code simpler and nicer. - lookup_comp_cache() was partially rewritten, mainly due to the fact we won't sleep to get a lock on the comp_page. - find_and_lock_comp_page() function removed and find_nolock_comp_page() was renamed to find_comp_page(). All functions that previously called find_and_lock... now calls the find_comp_page() and locks the comp_page at once with TryLockPage(). - oom_kill() was fixed and takes into account the free space in compressed cache by calling comp_cache_available_space(). That avoids killing an application if we have space left in compressed cache yet. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** adaptivity.c 2002/01/07 17:48:29 1.3 --- adaptivity.c 2002/01/14 12:05:08 1.4 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-07 11:18:58 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-12 14:21:34 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 25,29 **** /* if the comp_page is not empty, can't free it */ ! if (comp_page->number_of_pages) goto out; --- 25,29 ---- /* if the comp_page is not empty, can't free it */ ! if (!list_empty(&(comp_page->fragments))) goto out; *************** *** 61,65 **** /* we raced */ ! if (comp_page->number_of_pages) { UnlockPage(empty_comp_page->page); return retval; --- 61,65 ---- /* we raced */ ! if (!list_empty(&(comp_page->fragments))) { UnlockPage(empty_comp_page->page); return retval; Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -r1.11 -r1.12 *** aux.c 2002/01/10 12:39:31 1.11 --- aux.c 2002/01/14 12:05:08 1.12 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-08 16:33:30 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-13 16:16:07 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 176,180 **** inline comp_cache_t * ! find_nolock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; --- 176,180 ---- inline comp_cache_t * ! find_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; *************** *** 192,272 **** } ! /* ! * Search for entry in comp cache array. ! */ ! comp_cache_t * ! find_and_lock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { ! comp_cache_t * comp_page; ! comp_cache_fragment_t * fragment = NULL, * aux_fragment = NULL; ! struct page * page; ! struct list_head * fragment_lh; ! ! repeat: ! comp_page = find_nolock_comp_page(entry, fragment_out); ! if (comp_page) { ! page = comp_page->page; ! ! page_cache_get(page); ! ! lock_page(page); ! ! if (!comp_page->page) ! goto again; ! ! for_each_fragment(fragment_lh, comp_page) { ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (fragment->index == entry.val) { ! aux_fragment = fragment; ! break; ! } ! } ! ! if (aux_fragment) { ! if (aux_fragment != *fragment_out) ! *fragment_out = fragment; ! ! page_cache_release(page); ! return comp_page; ! } ! ! again: ! UnlockPage(page); ! page_cache_release(page); ! goto repeat; ! } ! return NULL; ! } ! ! inline void ! move_and_fix_fragments(comp_cache_t * comp_page, ! unsigned short offset_from, ! unsigned short offset_to, ! unsigned short size_to_move) { ! comp_cache_fragment_t * fragment; ! struct list_head * fragment_lh; ! ! /* to make sure we are not beyond our boundaries */ ! if (offset_from + size_to_move > PAGE_SIZE) ! BUG(); ! ! memmove(page_address(comp_page->page) + offset_to, page_address(comp_page->page) + offset_from, size_to_move); ! ! /* update all used fragments offsets */ ! for_each_fragment(fragment_lh, comp_page) { ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (!fragment->index) ! BUG(); ! ! if (fragment->offset < offset_from) ! continue; ! ! if (fragment->offset >= (offset_from + size_to_move)) ! continue; ! ! fragment->offset -= (offset_from - offset_to); ! } } --- 192,199 ---- } ! inline int ! comp_cache_free_space(void) { ! return 2 * (estimated_free_space >> PAGE_SHIFT); } *************** *** 290,294 **** comp_cache_fragment_t * fragment, * aux_fragment; struct list_head * fragment_lh, * aux_fragment_lh; ! int counter = 0, used_space = 0; swp_entry_t entry; --- 217,221 ---- comp_cache_fragment_t * fragment, * aux_fragment; struct list_head * fragment_lh, * aux_fragment_lh; ! int used_space = 0; swp_entry_t entry; *************** *** 312,320 **** used_space += fragment->compressed_size; - counter++; } - - if (counter != comp_page->number_of_pages) - BUG(); if (comp_page->free_space != PAGE_SIZE - used_space) --- 239,243 ---- Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** free.c 2002/01/10 12:39:31 1.15 --- free.c 2002/01/14 12:05:08 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-09 18:08:49 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-13 19:00:05 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 19,25 **** extern kmem_cache_t * fragment_cachep; ! extern inline void move_and_fix_fragments(comp_cache_t *, unsigned short, unsigned short, unsigned short); ! extern comp_cache_t * find_nolock_comp_page(swp_entry_t, comp_cache_fragment_t **); ! extern void remove_vswap(comp_cache_t *, comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ --- 19,23 ---- extern kmem_cache_t * fragment_cachep; ! extern void remove_fragment_vswap(comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ *************** *** 30,196 **** #define right_fragment(fragment1, fragment2) left_fragment(fragment2, fragment1) void ! comp_cache_free_nohash(comp_cache_fragment_t * fragment_to_free) { ! comp_cache_t * comp_page = fragment_to_free->comp_page; ! comp_cache_fragment_t * next_fragment, * previous_fragment, * fragment; ! struct list_head * fragment_lh, * fragment_to_free_lh; ! unsigned short offset_from, offset_to, size_to_move; if (!comp_page) BUG(); - if (!comp_page->page) BUG(); - - if (!PageLocked(comp_page->page)) - BUG(); - if (!PageCompCache(comp_page->page)) BUG(); ! ! if (!fragment_to_free) BUG(); ! if (not_compressed(fragment_to_free) && comp_page->free_space) BUG(); - - //check_all_fragments(comp_page); - - next_fragment = NULL; - previous_fragment = NULL; - fragment_to_free_lh = NULL; - - /* search the previous and the next fragment */ - for_each_fragment(fragment_lh, comp_page) { - fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); - - if (left_fragment(fragment_to_free, fragment)) { - /* corruption */ - if (next_fragment) - BUG(); - next_fragment = fragment; - } - - if (right_fragment(fragment_to_free, fragment)) { - /* corruption */ - if (previous_fragment) - BUG(); - previous_fragment = fragment; - } - - if (fragment == fragment_to_free) - fragment_to_free_lh = fragment_lh; ! /* corruption */ ! if (fragment->offset < fragment_to_free->offset && fragment->offset + fragment->compressed_size > fragment_to_free->offset) ! BUG(); ! } ! if (!fragment_to_free_lh) ! BUG(); ! //check_all_fragments(comp_page); ! /* simple case - no free space * 1 - one not compressed page * 2 - sum of all fragments = PAGE_SIZE */ if (!comp_page->free_space) { ! comp_page->free_offset = fragment_to_free->offset; ! goto out; } /* this fragment has the free space as its left neighbour */ ! if (comp_page->free_offset + comp_page->free_space == fragment_to_free->offset) { ! if (previous_fragment) ! BUG(); ! goto out; } /* this fragment has the free space as its right neighbour */ ! if (fragment_to_free->offset + fragment_to_free->compressed_size == comp_page->free_offset) { ! if (next_fragment) ! BUG(); ! comp_page->free_offset = fragment_to_free->offset; ! goto out; } /* we have used fragment(s) between the free space and the one we want to free */ ! if (comp_page->free_offset < fragment_to_free->offset) { ! if (comp_page->free_offset + comp_page->free_space >= fragment_to_free->offset) ! BUG(); ! offset_to = comp_page->free_offset; ! offset_from = comp_page->free_offset + comp_page->free_space; ! size_to_move = fragment_to_free->offset - offset_from; ! } ! else { ! if (fragment_to_free->offset + fragment_to_free->compressed_size >= comp_page->free_offset) ! BUG(); ! offset_to = fragment_to_free->offset; ! offset_from = fragment_to_free->offset + fragment_to_free->compressed_size; ! size_to_move = comp_page->free_offset - offset_from; ! } ! move_and_fix_fragments(comp_page, offset_from, offset_to, size_to_move); ! /* let's merge the fragments */ ! comp_page->free_offset = offset_to + size_to_move; ! out: ! remove_vswap(comp_page, fragment_to_free); ! ! if (comp_page->free_space > PAGE_SIZE) ! BUG(); ! ! /* free this fragments */ ! list_del(fragment_to_free_lh); ! remove_fragment_from_hash_table(fragment_to_free); ! remove_fragment_from_lru_queue(fragment_to_free); ! ! if (!comp_page->number_of_pages) ! BUG(); ! comp_page->free_space += fragment_to_free->compressed_size; ! comp_page->number_of_pages--; /* is this fragment waiting for swap out? let's not free it * now, but let's tell swap out path that it does not need IO * anymore because it has been freed (maybe due to swapin) */ ! if (CompFragmentIO(fragment_to_free)) { ! CompFragmentClearIO(fragment_to_free); ! return; ! } ! ! kmem_cache_free(fragment_cachep, (fragment_to_free)); ! } ! /** ! * - comp_cache_free: removes the fragment->comp_page from the avl ! * tree, frees the fragment and inserts it back into the avl tree if ! * we didn't shrink the cache ! * ! * - return value: nothing, but the fragment->comp_page->page will be ! * returned _unlocked_ ! * ! * @fragment: fragment which we are freeing */ ! inline void ! comp_cache_free(comp_cache_fragment_t * fragment) { ! comp_cache_t * comp_page = fragment->comp_page; - if (!PageLocked(comp_page->page)) - BUG(); ! /* remove from the free space hash table to update it */ ! remove_comp_page_from_hash_table(comp_page); ! ! /* effectively free it */ ! comp_cache_free_nohash(fragment); ! ! /* steal the page if we need to shrink the comp cache */ ! if (!shrink_comp_cache(comp_page)) { ! add_comp_page_to_hash_table(comp_page); ! UnlockPage(comp_page->page); ! } } --- 28,166 ---- #define right_fragment(fragment1, fragment2) left_fragment(fragment2, fragment1) + static inline void + merge_right_neighbour(comp_cache_fragment_t * fragment_to_free, comp_cache_fragment_t * right_fragment) + { + if (!right_fragment) + return; + + if (!right_fragment->index) { + fragment_to_free->compressed_size += right_fragment->compressed_size; + list_del(&(right_fragment->list)); + if (!CompFragmentTestandClearIO(right_fragment)) + kmem_cache_free(fragment_cachep, (right_fragment)); + + + } + } + + static inline void + merge_left_neighbour(comp_cache_fragment_t * fragment_to_free, comp_cache_fragment_t * left_fragment) + { + if (!left_fragment) + return; + + if (!left_fragment->index) { + fragment_to_free->offset = left_fragment->offset; + fragment_to_free->compressed_size += left_fragment->compressed_size; + + list_del(&(left_fragment->list)); + + if (!CompFragmentTestandClearIO(left_fragment)) + kmem_cache_free(fragment_cachep, (left_fragment)); + } + } + + static inline void + remove_fragment_from_comp_cache(comp_cache_fragment_t * fragment) + { + remove_fragment_vswap(fragment); + remove_fragment_from_hash_table(fragment); + remove_fragment_from_lru_queue(fragment); + } + void ! comp_cache_free(comp_cache_fragment_t * fragment) { ! comp_cache_t * comp_page; ! comp_cache_fragment_t * next_fragment, * previous_fragment; + if (!fragment) + BUG(); + comp_page = fragment->comp_page; if (!comp_page) BUG(); if (!comp_page->page) BUG(); if (!PageCompCache(comp_page->page)) BUG(); ! if (not_compressed(fragment) && comp_page->free_space) BUG(); ! if (TryLockPage(comp_page->page)) BUG(); ! /* remove from the free space hash table to update it */ ! remove_comp_page_from_hash_table(comp_page); ! /* fragment is added in the correct location to the comp_page ! * list (see get_comp_cache_page():swapout.c) */ ! next_fragment = NULL; ! if (fragment->list.next != &(comp_page->fragments)) ! next_fragment = list_entry(fragment->list.next, comp_cache_fragment_t, list); ! previous_fragment = NULL; ! if (fragment->list.prev != &(comp_page->fragments)) ! previous_fragment = list_entry(fragment->list.prev, comp_cache_fragment_t, list); ! /* simple case - no free space * 1 - one not compressed page * 2 - sum of all fragments = PAGE_SIZE */ if (!comp_page->free_space) { ! remove_fragment_from_comp_cache(fragment); ! comp_page->free_offset = fragment->offset; ! goto remove; } /* this fragment has the free space as its left neighbour */ ! if (comp_page->free_offset + comp_page->free_space == fragment->offset) { ! remove_fragment_from_comp_cache(fragment); ! ! merge_right_neighbour(fragment, next_fragment); ! goto remove; } /* this fragment has the free space as its right neighbour */ ! if (fragment->offset + fragment->compressed_size == comp_page->free_offset) { ! remove_fragment_from_comp_cache(fragment); ! ! merge_left_neighbour(fragment, previous_fragment); ! comp_page->free_offset = fragment->offset; ! goto remove; } /* we have used fragment(s) between the free space and the one we want to free */ ! remove_fragment_from_comp_cache(fragment); ! fragment->index = 0; ! merge_right_neighbour(fragment, next_fragment); ! merge_left_neighbour(fragment, previous_fragment); ! goto out; ! remove: ! /* remove the fragment from comp page */ ! list_del(&(fragment->list)); ! ! /* careful: that's not only the compressed size from this ! * fragment, but also the fragments that might have been ! * merged in merge_*_neighbour() functions above */ ! comp_page->free_space += fragment->compressed_size; /* is this fragment waiting for swap out? let's not free it * now, but let's tell swap out path that it does not need IO * anymore because it has been freed (maybe due to swapin) */ ! if (!CompFragmentTestandClearIO(fragment)) ! kmem_cache_free(fragment_cachep, (fragment)); ! out: ! /* steal the page if we need to shrink the comp cache */ ! if (shrink_comp_cache(comp_page)) ! return; ! add_comp_page_to_hash_table(comp_page); ! UnlockPage(comp_page->page); } *************** *** 209,220 **** * set the Freed bit, which will make it possible to be freed * later */ ! comp_page = find_nolock_comp_page(entry, &fragment); ! if (comp_page) { ! if (TryLockPage(comp_page->page)) ! BUG(); ! comp_cache_free(fragment); - } /* no virtual swap entry with a compressed page */ --- 179,186 ---- * set the Freed bit, which will make it possible to be freed * later */ ! comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); /* no virtual swap entry with a compressed page */ *************** *** 234,246 **** BUG(); - if (vswap_locked(vswap->offset)) - continue; - old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, vswap->offset); ! if (TryLockPage(vswap->fragment->comp_page->page)) { ! unlock_vswap(old_entry); continue; - } /* no swap cache page? ok, let's assign the real entry */ --- 200,207 ---- BUG(); old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, vswap->offset); ! if (TryLockPage(vswap->fragment->comp_page->page)) continue; /* no swap cache page? ok, let's assign the real entry */ *************** *** 253,257 **** /* couldn't lock the swap cache page, let's try * another page */ - unlock_vswap(old_entry); UnlockPage(vswap->fragment->comp_page->page); } --- 214,217 ---- *************** *** 279,287 **** vswap->fragment = VSWAP_RESERVED; - /* this info is just in the case some code path (eg swapin) - * need to know if a virtual swap entry has been set to a real - * swap entry. It will be cleared in comp_cache_swp_free() */ - vswap->real_entry.val = entry.val; - /* old_virtual_addressed_pte <- new real swap entry */ pte_list = vswap->pte_list; --- 239,242 ---- *************** *** 316,320 **** ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); --- 271,275 ---- ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); *************** *** 340,344 **** } - unlock_vswap(old_entry); UnlockPage(comp_page->page); --- 295,298 ---- Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** main.c 2002/01/10 12:39:31 1.17 --- main.c 2002/01/14 12:05:08 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-07 16:08:23 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-11 18:55:28 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 156,160 **** (*comp_page)->free_offset = 0; - (*comp_page)->number_of_pages = 0; (*comp_page)->page = page; --- 156,159 ---- *************** *** 182,186 **** min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre5 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ --- 181,185 ---- min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre6 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** swapin.c 2002/01/10 12:39:31 1.13 --- swapin.c 2002/01/14 12:05:08 1.14 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-08 11:27:02 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 08:31:09 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 25,35 **** if (!comp_page || !comp_page->page) BUG(); - if (!PageCompCache(comp_page->page)) ! PAGE_BUG(comp_page->page); ! ! if (!PageLocked(comp_page->page)) ! PAGE_BUG(comp_page->page); ! if (!PageLocked(uncompressed_page)) BUG(); --- 25,30 ---- if (!comp_page || !comp_page->page) BUG(); if (!PageCompCache(comp_page->page)) ! BUG(); if (!PageLocked(uncompressed_page)) BUG(); *************** *** 53,78 **** * needed. * ! * return value: the decompressed page in locked state * @fragment: fragment to be freed * @uncompressed_page: the page which will store the uncompressed data */ ! static inline struct page * ! decompress_and_free_fragment(comp_cache_fragment_t * fragment, struct page * uncompressed_page) { comp_cache_t * comp_page = fragment->comp_page; ! if (!PageLocked(uncompressed_page)) ! BUG(); ! decompress_page(fragment, uncompressed_page); comp_cache_free(fragment); ! ! /* let's fix the virtual swap entry since we are swapping in */ ! swapin_vswap(comp_page, fragment); ! ! return uncompressed_page; } - #define real_swap_address_was_assigned() (real_entry.val != entry->val) - /** * lookup_comp_cache - looks up for this swap entry in swap cache and --- 48,68 ---- * needed. * ! * return value: nothing * @fragment: fragment to be freed * @uncompressed_page: the page which will store the uncompressed data */ ! static inline void ! decompress_and_free_fragment(comp_cache_fragment_t * fragment, struct page * page) { comp_cache_t * comp_page = fragment->comp_page; ! if (!PageLocked(page)) ! BUG(); ! decompress_page(fragment, page); comp_cache_free(fragment); ! swapin_vswap(comp_page, fragment); ! comp_cache_update_faultin_stats(); } /** * lookup_comp_cache - looks up for this swap entry in swap cache and *************** *** 84,141 **** lookup_comp_cache(swp_entry_t entry) { ! struct page * page, * new_page = NULL; comp_cache_t * comp_page = NULL; comp_cache_fragment_t * fragment = NULL; - unsigned long offset; - - offset = SWP_OFFSET(entry); try_swap_cache: /* it might have been compressed but not yet freed */ ! page = lookup_swap_cache(entry); ! ! comp_page = find_and_lock_comp_page(entry, &fragment); /* ok, found in swap cache */ ! if (page) goto found_swap_cache; - - /* we might have slept in find_and_lock above */ - page = lookup_swap_cache(entry); - - if (page) - goto found_swap_cache; ! if (!comp_page) ! goto out; ! ! if (!comp_page->page) ! BUG(); ! ! /* sanity check */ ! if (!PageCompCache(comp_page->page)) ! PAGE_BUG(comp_page->page); ! ! if (!new_page) { ! UnlockPage(comp_page->page); ! ! /* allocate page for decompression */ ! new_page = alloc_page(GFP_HIGHUSER); ! /* we may sleep to allocate the page above, so let us ! * see if somebody has not swapped in this page */ goto try_swap_cache; } ! if (TryLockPage(new_page)) BUG(); ! page = decompress_and_free_fragment(fragment, new_page); ! ! /* fragment freed, should never happen */ ! if (!page) BUG(); ! ! comp_cache_update_faultin_stats(); if (add_to_swap_cache(page, entry)) --- 74,113 ---- lookup_comp_cache(swp_entry_t entry) { ! struct page * swap_cache_page, * page = NULL; comp_cache_t * comp_page = NULL; comp_cache_fragment_t * fragment = NULL; try_swap_cache: /* it might have been compressed but not yet freed */ ! swap_cache_page = lookup_swap_cache(entry); ! comp_page = find_comp_page(entry, &fragment); /* ok, found in swap cache */ ! if (swap_cache_page) goto found_swap_cache; ! if (!comp_page) { ! if (vswap_address(entry)) ! BUG(); ! return NULL; ! } ! /* has the page already been allocated? if it's not, let's ! * allocate and try again since we may sleep */ ! if (!page) { ! page = alloc_page(GFP_HIGHUSER); goto try_swap_cache; } ! if (TryLockPage(page)) BUG(); ! /* sanity check */ ! if (!comp_page->page) BUG(); ! if (!PageCompCache(comp_page->page)) ! BUG(); ! ! decompress_and_free_fragment(fragment, page); if (add_to_swap_cache(page, entry)) *************** *** 144,153 **** set_page_dirty(page); - DEBUG_CHECK_COUNT; - - new_page = NULL; - UnlockPage(page); ! goto out; found_swap_cache: --- 116,122 ---- set_page_dirty(page); UnlockPage(page); ! ! return page; found_swap_cache: *************** *** 161,178 **** * gotta set the page dirty bit back to make it to be * compressed if needed. */ ! set_page_dirty(page); ! comp_cache_free(fragment); } ! out: ! /* let's free the page which has been allocated but not used */ ! if (new_page) ! page_cache_release(new_page); ! ! if (vswap_address(entry) && !page) ! BUG(); ! return page; } --- 130,142 ---- * gotta set the page dirty bit back to make it to be * compressed if needed. */ ! set_page_dirty(swap_cache_page); ! comp_cache_free(fragment); } ! if (page) ! page_cache_release(page); ! return swap_cache_page; } *************** *** 206,217 **** /* fall through */ try_comp_cache: ! comp_page = find_and_lock_comp_page(*entry, &fragment); - /* check the page_cache again, in case we stalled above. */ - page = __find_get_page(mapping, idx, page_hash(mapping, idx)); - - if (page) - goto found_swap_cache; - if (!comp_page) { if (new_page) { --- 170,175 ---- /* fall through */ try_comp_cache: ! comp_page = find_comp_page(*entry, &fragment); if (!comp_page) { if (new_page) { *************** *** 226,229 **** --- 184,190 ---- BUG(); + if (TryLockPage(comp_page->page)) + BUG(); + /* sanity check */ if (!PageCompCache(comp_page->page)) *************** *** 242,246 **** BUG(); ! page = decompress_and_free_fragment(fragment, new_page); comp_cache_update_faultin_stats(); --- 203,208 ---- BUG(); ! decompress_and_free_fragment(fragment, new_page); ! page = new_page; comp_cache_update_faultin_stats(); *************** *** 256,261 **** found_swap_cache: ! if (!comp_page && find_nolock_comp_page(*entry, &fragment)) ! comp_page = find_and_lock_comp_page(*entry, &fragment); if (comp_page) { --- 218,223 ---- found_swap_cache: ! if (!comp_page && find_comp_page(*entry, &fragment)) ! comp_page = find_comp_page(*entry, &fragment); if (comp_page) { Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** swapout.c 2002/01/10 12:39:31 1.16 --- swapout.c 2002/01/14 12:05:08 1.17 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-09 19:05:54 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-13 18:21:19 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 23,32 **** struct list_head swp_free_buffer_head, swp_used_buffer_head; - #define swp_buffer_freed(swp_buffer) \ - (!PageLocked(swp_buffer->page) && list_empty(&swp_buffer->free_list)) - - #define swp_buffer_used(swp_buffer) \ - (page_count(swp_buffer->page) > 2 + !!swp_buffer->page->buffers) - /** * - find_free_swp_buffer - gets a swap buffer page. If there's a --- 23,26 ---- *************** *** 37,41 **** find_free_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page, * page; struct list_head * swp_buffer_lh, * tmp_lh; struct swp_buffer * swp_buffer; --- 31,35 ---- find_free_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page; struct list_head * swp_buffer_lh, * tmp_lh; struct swp_buffer * swp_buffer; *************** *** 62,75 **** * in? so let's free only the fragment struct */ if (!CompFragmentIO(swp_buffer->fragment)) { ! kmem_cache_free(fragment_cachep, (swp_buffer->fragment)); goto out; } ! /* it's not swapped out, so let' free it */ ! page = swp_buffer->fragment->comp_page->page; ! ! if (TryLockPage(page)) ! BUG(); CompFragmentClearIO(swp_buffer->fragment); comp_cache_free(swp_buffer->fragment); --- 56,71 ---- * in? so let's free only the fragment struct */ if (!CompFragmentIO(swp_buffer->fragment)) { ! kmem_cache_free(fragment_cachep, (swp_buffer->fragment)); goto out; } ! /* in the case it is waiting for merge in ! * comp_cache_free(), we can't free it */ ! if (!swp_buffer->fragment->index) { ! CompFragmentClearIO(swp_buffer->fragment); ! goto out; ! } + /* it's not swapped out, so let' free it */ CompFragmentClearIO(swp_buffer->fragment); comp_cache_free(swp_buffer->fragment); *************** *** 169,174 **** struct page * page; swp_entry_t entry; ! ! maxscan = 10; next_fragment = &lru_queue; --- 165,170 ---- struct page * page; swp_entry_t entry; ! ! maxscan = SWAP_CLUSTER_MAX; next_fragment = &lru_queue; *************** *** 211,215 **** } ! extern void comp_cache_free_nohash(comp_cache_fragment_t *); /** --- 207,211 ---- } ! extern void add_fragment_vswap(comp_cache_fragment_t *); /** *************** *** 227,237 **** get_comp_cache_page(struct page * swap_cache_page, unsigned short compressed_size, comp_cache_fragment_t ** fragment_out) { - struct list_head * fragment_lh = NULL, * temp_lh; comp_cache_t * comp_page = NULL; ! comp_cache_fragment_t * fragment = NULL; ! swp_entry_t entry; unsigned short aux_comp_size; int maxscan, maxtry; - unsigned long offset; if (!swap_cache_page) --- 223,231 ---- get_comp_cache_page(struct page * swap_cache_page, unsigned short compressed_size, comp_cache_fragment_t ** fragment_out) { comp_cache_t * comp_page = NULL; ! comp_cache_fragment_t * fragment = NULL, * previous_fragment = NULL; ! struct list_head * fragment_lh; unsigned short aux_comp_size; int maxscan, maxtry; if (!swap_cache_page) *************** *** 286,294 **** BUG(); - /* this entry is not used any longer, so we can free - * it for our use */ - if (!comp_page->number_of_pages) - goto reset; - goto check_references; } --- 280,283 ---- *************** *** 299,318 **** panic("no space in compressed cache whatsoever!\n"); - reset: - /* let's get rid of any fragment left */ - for_each_fragment_safe(fragment_lh, temp_lh, comp_page) { - fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); - - list_del(fragment_lh); - remove_fragment_from_hash_table(fragment); - remove_fragment_from_lru_queue(fragment); - kmem_cache_free(fragment_cachep, (fragment)); - } - - if (!list_empty(&(comp_page->fragments))) - BUG(); - - goto check_references; - new_page: /* remove from free space hash table before update */ --- 288,291 ---- *************** *** 365,398 **** fragment->comp_page = comp_page; - //check_all_fragments(comp_page); - /* let's update some important fields */ comp_page->free_space -= compressed_size; comp_page->free_offset += compressed_size; - comp_page->number_of_pages++; last_page_size[last_page] = compressed_size; last_page = (last_page++)%NUM_MEAN_PAGES; - - entry.val = fragment->index; - if (vswap_address(entry)) { - offset = SWP_OFFSET(entry); ! if (reserved(offset)) ! estimated_pages++; ! ! estimated_free_space -= compressed_size; ! vswap_address[offset]->fragment = fragment; ! if (!list_empty(&(vswap_address[offset]->list))) ! BUG(); ! list_add(&(vswap_address[offset]->list), &vswap_address_used_head); } ! /* add the fragment to the comp_page list of fragments */ ! list_add(&(fragment->list), &(comp_page->fragments)); /* only real swap adressed fragments are added to lru queue */ add_fragment_to_hash_table(fragment); --- 338,378 ---- fragment->comp_page = comp_page; /* let's update some important fields */ comp_page->free_space -= compressed_size; comp_page->free_offset += compressed_size; last_page_size[last_page] = compressed_size; last_page = (last_page++)%NUM_MEAN_PAGES; ! add_fragment_vswap(fragment); ! ! /* add the fragment to the comp_page list of fragments */ ! previous_fragment = list_entry(comp_page->fragments.prev, comp_cache_fragment_t, list); ! if (previous_fragment->offset + previous_fragment->compressed_size == fragment->offset) { ! list_add_tail(&(fragment->list), &(comp_page->fragments)); ! goto out; ! } ! /* let's search for the correct place in the comp_page list */ ! previous_fragment = NULL; ! ! for_each_fragment(fragment_lh, comp_page) { ! comp_cache_fragment_t * aux_fragment; ! aux_fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (aux_fragment->offset + aux_fragment->compressed_size > fragment->offset) ! break; ! ! previous_fragment = aux_fragment; } ! if (previous_fragment) ! list_add(&(fragment->list), &(previous_fragment->list)); ! else ! list_add(&(fragment->list), &(comp_page->fragments)); + out: /* only real swap adressed fragments are added to lru queue */ add_fragment_to_hash_table(fragment); Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** vswap.c 2002/01/10 12:39:31 1.15 --- vswap.c 2002/01/14 12:05:08 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-08 11:20:31 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-13 19:08:24 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 68,96 **** } - inline void - lock_vswap(swp_entry_t entry) { - unsigned long offset; - - if (!vswap_address(entry)) - return; - - offset = SWP_OFFSET(entry); - - down(&vswap_address[offset]->sem); - } - - inline void - unlock_vswap(swp_entry_t entry) { - unsigned long offset; - - if (!vswap_address(entry)) - return; - - offset = SWP_OFFSET(entry); - - up(&vswap_address[offset]->sem); - } - - swp_entry_t get_virtual_swap_page(struct page * page, unsigned short count) --- 68,71 ---- *************** *** 124,128 **** vswap_address[offset]->count = count; - vswap_address[offset]->real_entry.val = 0; vswap_address[offset]->fragment = VSWAP_RESERVED; vswap_address[offset]->pte_list = NULL; --- 99,102 ---- *************** *** 189,195 **** comp_page = fragment->comp_page; - if (TryLockPage(comp_page->page)) - BUG(); - comp_cache_free(fragment); --- 163,166 ---- *************** *** 207,211 **** vswap->count = 0; - vswap->real_entry.val = 0; vswap->fragment = NULL; vswap->pte_list = NULL; --- 178,181 ---- *************** *** 235,239 **** inline void ! remove_vswap(comp_cache_t * comp_page, comp_cache_fragment_t * fragment) { swp_entry_t entry; --- 205,209 ---- inline void ! remove_fragment_vswap(comp_cache_fragment_t * fragment) { swp_entry_t entry; *************** *** 246,251 **** return; - estimated_free_space += fragment->compressed_size; - /* it's ok fragment == NULL since the vswap may have been * freed in swp_free, but the fragment was only set to Freed */ --- 216,219 ---- *************** *** 259,262 **** --- 227,259 ---- * address */ list_del_init(&(vswap_address[offset]->list)); + + estimated_free_space += fragment->compressed_size; + } + + inline void + add_fragment_vswap(comp_cache_fragment_t * fragment) + { + swp_entry_t entry; + unsigned long offset; + + entry.val = fragment->index; + offset = SWP_OFFSET(entry); + + if (!vswap_address(entry)) + return; + + offset = SWP_OFFSET(entry); + + if (reserved(offset)) + estimated_pages++; + + vswap_address[offset]->fragment = fragment; + + if (!list_empty(&(vswap_address[offset]->list))) + BUG(); + + list_add(&(vswap_address[offset]->list), &vswap_address_used_head); + + estimated_free_space -= fragment->compressed_size; } *************** *** 399,409 **** vswap_address[i]->offset = i; - vswap_address[i]->real_entry.val = 0; vswap_address[i]->pte_list = NULL; vswap_address[i]->swap_cache_page = NULL; vswap_address[i]->fragment = NULL; - init_MUTEX(&vswap_address[i]->sem); - list_add(&(vswap_address[i]->list), &vswap_address_free_head); } --- 396,403 ---- |