[lc-checkins] CVS: linux/mm/comp_cache free.c,1.17,1.18 swapin.c,1.15,1.16 swapout.c,1.18,1.19 vswap
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-01-24 22:05:09
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv7824/mm/comp_cache Modified Files: free.c swapin.c swapout.c vswap.c Log Message: This patch include changes regarding the tlb and cache flushing when setting pte. It also fixes a stupid bug in vswap. - comp_cache_release() split into comp_cache_release() and comp_cache_use_address(). - comp_cache_use_address() now searches for the vma for the pte we are going to change. If found, we flush the cache and tlb for this pte before setting it. If not found, we count this reference for the swap count and when this pte faults in or gets freed, we fix the counters. - so we have to handle the cases in lookup_comp_cache() in which the pte gets changed when we sleep to get a new page. In this case, we check to see if the pte has changed. If it has, wow, it's time to return and waits it to fault in again, now with the real address. - to flush, some code was copied from rmap patch by Rik van Riel. He has the same problem and devised a nice way to decrease complexity when looking for the mm struct. - real_entry field was added back to vswap_address struct - fix a _stupid_ bug in vswap. The estimated_pages is a variable that avoids that we assign a huge number of vswaps which are reserved (vswap which is used but does not have a fragment, so we don't know how bit the fragment will be). This variable was supposed to have negative values, _but_ it was declared as unsigned long. :-( - (in this meanwhile I tried to add a referente to the swap count for our compressed fragment, but it turned out to perform not as well as the old code. I intended to remove that find_comp_page() and comp_cache_free() from comp_cache_release(), but that take less time to execute than some general idea, so I reverted the changes). Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** free.c 2002/01/16 16:30:12 1.17 --- free.c 2002/01/24 22:05:03 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-14 16:13:12 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-24 19:49:39 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 20,23 **** --- 20,24 ---- extern void remove_fragment_vswap(comp_cache_fragment_t *); + extern void add_fragment_vswap(comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ *************** *** 67,71 **** remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); ! remove_fragment_from_lru_queue(fragment); } --- 68,72 ---- remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); ! remove_fragment_from_lru_queue(fragment); } *************** *** 140,144 **** /* remove the fragment from comp page */ list_del(&(fragment->list)); ! /* careful: that's not only the compressed size from this * fragment, but also the fragments that might have been --- 141,145 ---- /* remove the fragment from comp page */ list_del(&(fragment->list)); ! /* careful: that's not only the compressed size from this * fragment, but also the fragments that might have been *************** *** 180,203 **** } ! ! int comp_cache_release(swp_entry_t entry) { comp_cache_t * comp_page; comp_cache_fragment_t * fragment = NULL; - struct vswap_address * vswap; - struct list_head * vswap_lh; - struct pte_list * pte_list, * next_pte_list = NULL; - swp_entry_t old_entry; - /* let's check if there is any compressed page set to this - * entry and free it at once if possible. Otherwise, we will - * set the Freed bit, which will make it possible to be freed - * later */ comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); /* no virtual swap entry with a compressed page */ if (list_empty(&vswap_address_used_head)) --- 181,211 ---- } ! inline void comp_cache_release(swp_entry_t entry) { comp_cache_t * comp_page; comp_cache_fragment_t * fragment = NULL; comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); + } + + extern atomic_t estimated_pages; + int + comp_cache_use_address(swp_entry_t entry) + { + comp_cache_fragment_t * fragment = NULL; + struct mm_struct * mm; + unsigned long address; + struct vm_area_struct * vma; + pte_t pte; + struct vswap_address * vswap; + struct list_head * vswap_lh; + struct pte_list * pte_list, * next_pte_list = NULL; + swp_entry_t old_entry; + /* no virtual swap entry with a compressed page */ if (list_empty(&vswap_address_used_head)) *************** *** 237,266 **** return 0; - /* remove from used list, since we won't have a virtual - * addresses compressed page any longer */ - list_del_init(vswap_lh); - fragment = vswap->fragment; - comp_page = fragment->comp_page; remove_fragment_from_hash_table(fragment); - fragment->index = entry.val; - - estimated_free_space += fragment->compressed_size; - - add_fragment_to_hash_table(fragment); - add_fragment_to_lru_queue(fragment); - - vswap->fragment = VSWAP_RESERVED; - - /* old_virtual_addressed_pte <- new real swap entry */ - pte_list = vswap->pte_list; - /* let's fix swap cache page address (if any) */ if (vswap->swap_cache_page) { ! struct page * swap_cache_page; ! ! swap_cache_page = vswap->swap_cache_page; if (!PageLocked(swap_cache_page)) --- 245,256 ---- return 0; fragment = vswap->fragment; + remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); /* let's fix swap cache page address (if any) */ if (vswap->swap_cache_page) { ! struct page * swap_cache_page = vswap->swap_cache_page; if (!PageLocked(swap_cache_page)) *************** *** 268,316 **** page_cache_get(swap_cache_page); - comp_cache_swp_duplicate(old_entry); delete_from_swap_cache(swap_cache_page); - comp_cache_swp_free_generic(old_entry, 0); - - //swap_duplicate(entry); if (add_to_swap_cache(swap_cache_page, entry)) BUG(); ! page_cache_release(swap_cache_page); UnlockPage(swap_cache_page); } while (pte_list) { next_pte_list = pte_list->next; - - remove_pte_vswap(pte_list->ptep); ! ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); ! comp_cache_swp_free_generic(old_entry, 0); ! pte_list = next_pte_list; ! } ! /* the virtual swap entry is supposed to be freed now (ie, ! * pte_list = NULL, swap_cache_page = NULL and added back to ! * free list) */ ! if (vswap->pte_list) ! BUG(); ! if (vswap->swap_cache_page) ! BUG(); ! if (vswap->count) { ! swp_entry_t entry = SWP_ENTRY(31,vswap->offset); ! ! printk("entry: %08lx\n", entry.val); ! BUG(); } ! UnlockPage(comp_page->page); return 1; } --- 258,310 ---- page_cache_get(swap_cache_page); + comp_cache_swp_duplicate(old_entry); delete_from_swap_cache(swap_cache_page); if (add_to_swap_cache(swap_cache_page, entry)) BUG(); ! ! comp_cache_swp_free_generic(old_entry, 0); page_cache_release(swap_cache_page); UnlockPage(swap_cache_page); } + /* old_virtual_addressed_pte <- new real swap entry */ + pte_list = vswap->pte_list; + while (pte_list) { next_pte_list = pte_list->next; ! mm = ptep_to_mm(pte_list->ptep); ! address = ptep_to_address(pte_list->ptep); ! vma = find_vma(mm, address); ! swap_duplicate(entry); + + if (!vma) + goto next; + + remove_pte_vswap(pte_list->ptep); ! pte = ptep_get_and_clear(pte_list->ptep); ! flush_tlb_page(vma, address); ! flush_cache_page(vma, address); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); ! comp_cache_swp_free_generic(old_entry, 0); ! next: ! pte_list = next_pte_list; } ! fragment->index = entry.val; ! add_fragment_to_lru_queue(fragment); + if (vswap->pte_list) + vswap->real_entry.val = entry.val; + + add_fragment_to_hash_table(fragment); + UnlockPage(fragment->comp_page->page); return 1; } Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** swapin.c 2002/01/16 16:30:12 1.15 --- swapin.c 2002/01/24 22:05:04 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 16:59:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-24 11:39:11 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 85,101 **** comp_cache_fragment_t * fragment = NULL; ! try_swap_cache: /* it might have been compressed but not yet freed */ swap_cache_page = lookup_swap_cache(entry); comp_page = find_comp_page(entry, &fragment); ! /* ok, found in swap cache */ if (swap_cache_page) goto found_swap_cache; ! if (!comp_page) { ! if (vswap_address(entry)) BUG(); ! return NULL; } --- 85,103 ---- comp_cache_fragment_t * fragment = NULL; ! try_swap_cache: /* it might have been compressed but not yet freed */ swap_cache_page = lookup_swap_cache(entry); comp_page = find_comp_page(entry, &fragment); ! /* ok, found in swap cache */ if (swap_cache_page) goto found_swap_cache; ! ! /* it will happen with vswap address only if the vswap address ! * had a real address assigned */ if (!comp_page) { ! if (vswap_address(entry) && !vswap_address[SWP_OFFSET(entry)]->real_entry.val) BUG(); ! goto out; } *************** *** 124,128 **** UnlockPage(page); - return page; --- 126,129 ---- *************** *** 142,145 **** --- 143,147 ---- } + out: if (page) page_cache_release(page); Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.18 retrieving revision 1.19 diff -C2 -r1.18 -r1.19 *** swapout.c 2002/01/16 16:30:12 1.18 --- swapout.c 2002/01/24 22:05:04 1.19 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-14 15:39:40 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-22 11:34:23 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 195,199 **** * function below */ swap_duplicate(entry); ! swp_buffer = decompress_to_swp_buffer(fragment); --- 195,199 ---- * function below */ swap_duplicate(entry); ! swp_buffer = decompress_to_swp_buffer(fragment); *************** *** 201,205 **** if (!swp_buffer) goto out; ! rw_swap_page(WRITE, swp_buffer->page); --- 201,205 ---- if (!swp_buffer) goto out; ! rw_swap_page(WRITE, swp_buffer->page); *************** *** 339,343 **** fragment->flags = 0; fragment->comp_page = comp_page; ! /* let's update some important fields */ comp_page->free_space -= compressed_size; --- 339,343 ---- fragment->flags = 0; fragment->comp_page = comp_page; ! /* let's update some important fields */ comp_page->free_space -= compressed_size; Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** vswap.c 2002/01/16 16:30:12 1.17 --- vswap.c 2002/01/24 22:05:04 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-14 16:58:59 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-24 19:27:40 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 26,30 **** unsigned short * last_page_size; unsigned short last_page = 0; ! unsigned int estimated_pages; static kmem_cache_t * pte_cachep; --- 26,30 ---- unsigned short * last_page_size; unsigned short last_page = 0; ! long estimated_pages; static kmem_cache_t * pte_cachep; *************** *** 50,59 **** int comp_cache_available_space(void) { ! int mean_size; ! mean_size = (unsigned short) (estimated_free_space/real_num_comp_pages); ! if (mean_size < 0 || mean_size > PAGE_SIZE) { ! printk("mean_size: %d\n", mean_size); BUG(); } --- 50,59 ---- int comp_cache_available_space(void) { ! int available_mean_size; ! available_mean_size = (unsigned short) (estimated_free_space/real_num_comp_pages); ! if (available_mean_size < 0 || available_mean_size > PAGE_SIZE) { ! printk("available_mean_size: %d\n", available_mean_size); BUG(); } *************** *** 61,66 **** if (!vswap_address_available()) return 0; ! ! if (mean_size < comp_cache_mean_size()) return 0; --- 61,68 ---- if (!vswap_address_available()) return 0; ! ! /* it the available mean size in the comp cache bigger than ! * the mean size of the last pages? if it's not, return 0 */ ! if (available_mean_size < comp_cache_mean_size()) return 0; *************** *** 84,89 **** return entry; - estimated_pages--; - vswap = list_entry(vswap_address_free_head.next, struct vswap_address, list); list_del_init(vswap_address_free_head.next); --- 86,89 ---- *************** *** 99,106 **** vswap_address[offset]->count = count; - vswap_address[offset]->fragment = VSWAP_RESERVED; vswap_address[offset]->pte_list = NULL; ! vswap_address[offset]->swap_cache_page = NULL; entry = SWP_ENTRY(type, offset); --- 99,109 ---- vswap_address[offset]->count = count; vswap_address[offset]->pte_list = NULL; ! vswap_address[offset]->swap_cache_page = NULL; ! vswap_address[offset]->real_entry.val = 0; + vswap_address[offset]->fragment = VSWAP_RESERVED; + estimated_pages--; + entry = SWP_ENTRY(type, offset); *************** *** 123,126 **** --- 126,132 ---- BUG(); + if (vswap_address[offset]->real_entry.val) + BUG(); + vswap_address[offset]->count++; } *************** *** 142,154 **** BUG(); if (--vswap->count) return vswap->count; ! /* do we have a compressed page for this virtual entry? in the ! * case we do, let's free the entry. */ ! if (reserved(offset)) { ! estimated_pages++; goto out; - } if (!vswap->fragment) --- 148,161 ---- BUG(); + /* a real address has been assigned, but we couldn't set this + * pte, so let's update the swap count for the real address */ + if (vswap->real_entry.val) + swap_free(vswap->real_entry); + if (--vswap->count) return vswap->count; ! if (reserved(offset)) goto out; if (!vswap->fragment) *************** *** 166,169 **** --- 173,178 ---- out: + estimated_pages++; + /* remove from the used virtual swap entries list. This list * contains only virtual swap addresses which have compressed *************** *** 181,184 **** --- 190,194 ---- vswap->pte_list = NULL; vswap->swap_cache_page = NULL; + vswap->real_entry.val = 0; if (!list_empty(&vswap->list)) *************** *** 220,223 **** --- 230,234 ---- vswap_address[offset]->fragment = VSWAP_RESERVED; + estimated_pages--; /* remove the virtual swap entry from the used list, since *************** *** 227,231 **** estimated_free_space += fragment->compressed_size; - estimated_pages--; } --- 238,241 ---- *************** *** 244,250 **** offset = SWP_OFFSET(entry); ! if (reserved(offset)) ! estimated_pages++; ! vswap_address[offset]->fragment = fragment; --- 254,265 ---- offset = SWP_OFFSET(entry); ! if (!reserved(offset)) ! BUG(); ! ! estimated_pages++; ! ! if (estimated_pages > NUM_VSWAP_ENTRIES) ! BUG(); ! vswap_address[offset]->fragment = fragment; *************** *** 332,336 **** entry.val = page->index; ! if (!vswap_address(entry)) return; --- 347,351 ---- entry.val = page->index; ! if (!vswap_address(entry)) return; *************** *** 381,384 **** --- 396,400 ---- vswap_address[i]->swap_cache_page = NULL; vswap_address[i]->fragment = NULL; + vswap_address[i]->real_entry.val = 0; list_add(&(vswap_address[i]->list), &vswap_address_free_head); *************** *** 386,390 **** estimated_free_space = PAGE_SIZE * real_num_comp_pages; ! estimated_pages = (1.5 * real_num_comp_pages); last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short)); --- 402,406 ---- estimated_free_space = PAGE_SIZE * real_num_comp_pages; ! estimated_pages = 1.5 * real_num_comp_pages; last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short)); |