linuxcompressed-checkins Mailing List for Linux Compressed Cache (Page 13)
Status: Beta
Brought to you by:
nitin_sf
You can subscribe to this list here.
2001 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(2) |
Nov
|
Dec
(31) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2002 |
Jan
(28) |
Feb
(50) |
Mar
(29) |
Apr
(6) |
May
(33) |
Jun
(36) |
Jul
(60) |
Aug
(7) |
Sep
(12) |
Oct
|
Nov
(13) |
Dec
(3) |
2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(9) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
2006 |
Jan
(13) |
Feb
(4) |
Mar
(4) |
Apr
(1) |
May
|
Jun
(22) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Rodrigo S. de C. <rc...@us...> - 2002-02-07 21:07:30
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv13905/mm Added Files: filemap.c Log Message: This version features the adaptable vswap implementation. It's the first version and have to be improved, but it seems stable though. One of the improvements is to deal with many shrink and grows in a row (what I think it's stable but not completely functional). And we still have to deal with a possible find_vma failure, what will cause a BUG() in the current code. - created vswap_alloc_and_init(), like init_comp_page(), but allocates the page too. - now {grow,shrink}_comp_cache() checks also if the vswap needs to be shrunk or grown, using vswap_needs_to_{grow,shrink}(). - grow_vswap() is a very simple function. It allocates the new vswap table, copies the pointer to the vswap entries, allocate the new vswap entries and that's it. - shrink_vswap() is a complex function. It tries to compact all the used vswap entries and only when we could do it (it may be done at once, depends on the page locks), we effectively shrink the cache. |
From: Rodrigo S. de C. <rc...@us...> - 2002-02-07 21:07:29
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv13905/include/linux Modified Files: comp_cache.h Log Message: This version features the adaptable vswap implementation. It's the first version and have to be improved, but it seems stable though. One of the improvements is to deal with many shrink and grows in a row (what I think it's stable but not completely functional). And we still have to deal with a possible find_vma failure, what will cause a BUG() in the current code. - created vswap_alloc_and_init(), like init_comp_page(), but allocates the page too. - now {grow,shrink}_comp_cache() checks also if the vswap needs to be shrunk or grown, using vswap_needs_to_{grow,shrink}(). - grow_vswap() is a very simple function. It allocates the new vswap table, copies the pointer to the vswap entries, allocate the new vswap entries and that's it. - shrink_vswap() is a complex function. It tries to compact all the used vswap entries and only when we could do it (it may be done at once, depends on the page locks), we effectively shrink the cache. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.44 retrieving revision 1.45 diff -C2 -r1.44 -r1.45 *** comp_cache.h 31 Jan 2002 21:08:16 -0000 1.44 --- comp_cache.h 7 Feb 2002 21:07:24 -0000 1.45 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-31 18:07:29 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-02-06 16:52:24 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 28,37 **** #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.22pre1" /* maximum compressed size of a page */ #define MAX_COMPRESSED_SIZE 4500 ! #define NUM_VSWAP_ENTRIES 3 * max_num_comp_pages extern unsigned long real_num_comp_pages, new_num_comp_pages, max_num_comp_pages; --- 28,37 ---- #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.22pre2" /* maximum compressed size of a page */ #define MAX_COMPRESSED_SIZE 4500 ! #define NUM_VSWAP_ENTRIES (3 * real_num_comp_pages) extern unsigned long real_num_comp_pages, new_num_comp_pages, max_num_comp_pages; *************** *** 348,354 **** }; - extern struct list_head vswap_address_free_head, vswap_address_used_head; extern struct vswap_address ** vswap_address; extern unsigned long estimated_free_space; #define COMP_CACHE_SWP_TYPE MAX_SWAPFILES --- 348,361 ---- }; extern struct vswap_address ** vswap_address; + extern struct list_head vswap_address_free_head; + extern struct list_head vswap_address_used_head; + extern unsigned long estimated_free_space; + extern long estimated_pages; + + extern unsigned long vswap_current_num_entries; + extern unsigned long vswap_num_used_entries; + extern unsigned int vswap_last_used; #define COMP_CACHE_SWP_TYPE MAX_SWAPFILES *************** *** 356,360 **** #ifdef CONFIG_COMP_CACHE - #define vswap_address_available() (!list_empty(&vswap_address_free_head)) #define vswap_info_struct(p) (p == &swap_info[COMP_CACHE_SWP_TYPE]) #define vswap_address(entry) (SWP_TYPE(entry) == COMP_CACHE_SWP_TYPE) --- 363,366 ---- *************** *** 376,379 **** --- 382,387 ---- inline void del_swap_cache_page_vswap(struct page *); + void vswap_alloc_and_init(struct vswap_address **, int); + #else *************** *** 397,402 **** /* free.c */ ! inline int comp_cache_free(comp_cache_fragment_t *); ! int comp_cache_free_locked(comp_cache_fragment_t *); #ifdef CONFIG_COMP_CACHE --- 405,410 ---- /* free.c */ ! inline void comp_cache_free(comp_cache_fragment_t *); ! void comp_cache_free_locked(comp_cache_fragment_t *); #ifdef CONFIG_COMP_CACHE *************** *** 455,460 **** extern comp_cache_fragment_t ** fragment_hash; ! extern unsigned int fragment_hash_size; ! extern unsigned int fragment_hash_used; extern unsigned int fragment_hash_order; --- 463,468 ---- extern comp_cache_fragment_t ** fragment_hash; ! extern unsigned long fragment_hash_size; ! extern unsigned long fragment_hash_used; extern unsigned int fragment_hash_order; *************** *** 491,495 **** comp_cache_t * search_comp_page_free_space(int); ! comp_cache_fragment_t ** create_fragment_hash(unsigned int *, unsigned int *); extern struct list_head lru_queue; --- 499,503 ---- comp_cache_t * search_comp_page_free_space(int); ! comp_cache_fragment_t ** create_fragment_hash(unsigned long *, unsigned int *); extern struct list_head lru_queue; |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-26 16:50:48
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv16792/mm/comp_cache Modified Files: swapin.c Log Message: - removed an if that may cause a wrong kernel BUG(). It's possible to not find a vswap entry if the pte has been changed in the meanwhile (so that if may be true and system still stable). Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** swapin.c 2002/01/24 22:05:04 1.16 --- swapin.c 2002/01/26 16:50:45 1.17 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-24 11:39:11 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-26 14:30:29 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 96,104 **** /* it will happen with vswap address only if the vswap address * had a real address assigned */ ! if (!comp_page) { ! if (vswap_address(entry) && !vswap_address[SWP_OFFSET(entry)]->real_entry.val) ! BUG(); goto out; - } /* has the page already been allocated? if it's not, let's --- 96,101 ---- /* it will happen with vswap address only if the vswap address * had a real address assigned */ ! if (!comp_page) goto out; /* has the page already been allocated? if it's not, let's |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-24 22:05:09
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv7824/mm/comp_cache Modified Files: free.c swapin.c swapout.c vswap.c Log Message: This patch include changes regarding the tlb and cache flushing when setting pte. It also fixes a stupid bug in vswap. - comp_cache_release() split into comp_cache_release() and comp_cache_use_address(). - comp_cache_use_address() now searches for the vma for the pte we are going to change. If found, we flush the cache and tlb for this pte before setting it. If not found, we count this reference for the swap count and when this pte faults in or gets freed, we fix the counters. - so we have to handle the cases in lookup_comp_cache() in which the pte gets changed when we sleep to get a new page. In this case, we check to see if the pte has changed. If it has, wow, it's time to return and waits it to fault in again, now with the real address. - to flush, some code was copied from rmap patch by Rik van Riel. He has the same problem and devised a nice way to decrease complexity when looking for the mm struct. - real_entry field was added back to vswap_address struct - fix a _stupid_ bug in vswap. The estimated_pages is a variable that avoids that we assign a huge number of vswaps which are reserved (vswap which is used but does not have a fragment, so we don't know how bit the fragment will be). This variable was supposed to have negative values, _but_ it was declared as unsigned long. :-( - (in this meanwhile I tried to add a referente to the swap count for our compressed fragment, but it turned out to perform not as well as the old code. I intended to remove that find_comp_page() and comp_cache_free() from comp_cache_release(), but that take less time to execute than some general idea, so I reverted the changes). Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** free.c 2002/01/16 16:30:12 1.17 --- free.c 2002/01/24 22:05:03 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-14 16:13:12 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-24 19:49:39 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 20,23 **** --- 20,24 ---- extern void remove_fragment_vswap(comp_cache_fragment_t *); + extern void add_fragment_vswap(comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ *************** *** 67,71 **** remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); ! remove_fragment_from_lru_queue(fragment); } --- 68,72 ---- remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); ! remove_fragment_from_lru_queue(fragment); } *************** *** 140,144 **** /* remove the fragment from comp page */ list_del(&(fragment->list)); ! /* careful: that's not only the compressed size from this * fragment, but also the fragments that might have been --- 141,145 ---- /* remove the fragment from comp page */ list_del(&(fragment->list)); ! /* careful: that's not only the compressed size from this * fragment, but also the fragments that might have been *************** *** 180,203 **** } ! ! int comp_cache_release(swp_entry_t entry) { comp_cache_t * comp_page; comp_cache_fragment_t * fragment = NULL; - struct vswap_address * vswap; - struct list_head * vswap_lh; - struct pte_list * pte_list, * next_pte_list = NULL; - swp_entry_t old_entry; - /* let's check if there is any compressed page set to this - * entry and free it at once if possible. Otherwise, we will - * set the Freed bit, which will make it possible to be freed - * later */ comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); /* no virtual swap entry with a compressed page */ if (list_empty(&vswap_address_used_head)) --- 181,211 ---- } ! inline void comp_cache_release(swp_entry_t entry) { comp_cache_t * comp_page; comp_cache_fragment_t * fragment = NULL; comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); + } + + extern atomic_t estimated_pages; + int + comp_cache_use_address(swp_entry_t entry) + { + comp_cache_fragment_t * fragment = NULL; + struct mm_struct * mm; + unsigned long address; + struct vm_area_struct * vma; + pte_t pte; + struct vswap_address * vswap; + struct list_head * vswap_lh; + struct pte_list * pte_list, * next_pte_list = NULL; + swp_entry_t old_entry; + /* no virtual swap entry with a compressed page */ if (list_empty(&vswap_address_used_head)) *************** *** 237,266 **** return 0; - /* remove from used list, since we won't have a virtual - * addresses compressed page any longer */ - list_del_init(vswap_lh); - fragment = vswap->fragment; - comp_page = fragment->comp_page; remove_fragment_from_hash_table(fragment); - fragment->index = entry.val; - - estimated_free_space += fragment->compressed_size; - - add_fragment_to_hash_table(fragment); - add_fragment_to_lru_queue(fragment); - - vswap->fragment = VSWAP_RESERVED; - - /* old_virtual_addressed_pte <- new real swap entry */ - pte_list = vswap->pte_list; - /* let's fix swap cache page address (if any) */ if (vswap->swap_cache_page) { ! struct page * swap_cache_page; ! ! swap_cache_page = vswap->swap_cache_page; if (!PageLocked(swap_cache_page)) --- 245,256 ---- return 0; fragment = vswap->fragment; + remove_fragment_vswap(fragment); remove_fragment_from_hash_table(fragment); /* let's fix swap cache page address (if any) */ if (vswap->swap_cache_page) { ! struct page * swap_cache_page = vswap->swap_cache_page; if (!PageLocked(swap_cache_page)) *************** *** 268,316 **** page_cache_get(swap_cache_page); - comp_cache_swp_duplicate(old_entry); delete_from_swap_cache(swap_cache_page); - comp_cache_swp_free_generic(old_entry, 0); - - //swap_duplicate(entry); if (add_to_swap_cache(swap_cache_page, entry)) BUG(); ! page_cache_release(swap_cache_page); UnlockPage(swap_cache_page); } while (pte_list) { next_pte_list = pte_list->next; - - remove_pte_vswap(pte_list->ptep); ! ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); ! comp_cache_swp_free_generic(old_entry, 0); ! pte_list = next_pte_list; ! } ! /* the virtual swap entry is supposed to be freed now (ie, ! * pte_list = NULL, swap_cache_page = NULL and added back to ! * free list) */ ! if (vswap->pte_list) ! BUG(); ! if (vswap->swap_cache_page) ! BUG(); ! if (vswap->count) { ! swp_entry_t entry = SWP_ENTRY(31,vswap->offset); ! ! printk("entry: %08lx\n", entry.val); ! BUG(); } ! UnlockPage(comp_page->page); return 1; } --- 258,310 ---- page_cache_get(swap_cache_page); + comp_cache_swp_duplicate(old_entry); delete_from_swap_cache(swap_cache_page); if (add_to_swap_cache(swap_cache_page, entry)) BUG(); ! ! comp_cache_swp_free_generic(old_entry, 0); page_cache_release(swap_cache_page); UnlockPage(swap_cache_page); } + /* old_virtual_addressed_pte <- new real swap entry */ + pte_list = vswap->pte_list; + while (pte_list) { next_pte_list = pte_list->next; ! mm = ptep_to_mm(pte_list->ptep); ! address = ptep_to_address(pte_list->ptep); ! vma = find_vma(mm, address); ! swap_duplicate(entry); + + if (!vma) + goto next; + + remove_pte_vswap(pte_list->ptep); ! pte = ptep_get_and_clear(pte_list->ptep); ! flush_tlb_page(vma, address); ! flush_cache_page(vma, address); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); ! comp_cache_swp_free_generic(old_entry, 0); ! next: ! pte_list = next_pte_list; } ! fragment->index = entry.val; ! add_fragment_to_lru_queue(fragment); + if (vswap->pte_list) + vswap->real_entry.val = entry.val; + + add_fragment_to_hash_table(fragment); + UnlockPage(fragment->comp_page->page); return 1; } Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** swapin.c 2002/01/16 16:30:12 1.15 --- swapin.c 2002/01/24 22:05:04 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 16:59:13 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-24 11:39:11 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 85,101 **** comp_cache_fragment_t * fragment = NULL; ! try_swap_cache: /* it might have been compressed but not yet freed */ swap_cache_page = lookup_swap_cache(entry); comp_page = find_comp_page(entry, &fragment); ! /* ok, found in swap cache */ if (swap_cache_page) goto found_swap_cache; ! if (!comp_page) { ! if (vswap_address(entry)) BUG(); ! return NULL; } --- 85,103 ---- comp_cache_fragment_t * fragment = NULL; ! try_swap_cache: /* it might have been compressed but not yet freed */ swap_cache_page = lookup_swap_cache(entry); comp_page = find_comp_page(entry, &fragment); ! /* ok, found in swap cache */ if (swap_cache_page) goto found_swap_cache; ! ! /* it will happen with vswap address only if the vswap address ! * had a real address assigned */ if (!comp_page) { ! if (vswap_address(entry) && !vswap_address[SWP_OFFSET(entry)]->real_entry.val) BUG(); ! goto out; } *************** *** 124,128 **** UnlockPage(page); - return page; --- 126,129 ---- *************** *** 142,145 **** --- 143,147 ---- } + out: if (page) page_cache_release(page); Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.18 retrieving revision 1.19 diff -C2 -r1.18 -r1.19 *** swapout.c 2002/01/16 16:30:12 1.18 --- swapout.c 2002/01/24 22:05:04 1.19 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-14 15:39:40 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-22 11:34:23 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 195,199 **** * function below */ swap_duplicate(entry); ! swp_buffer = decompress_to_swp_buffer(fragment); --- 195,199 ---- * function below */ swap_duplicate(entry); ! swp_buffer = decompress_to_swp_buffer(fragment); *************** *** 201,205 **** if (!swp_buffer) goto out; ! rw_swap_page(WRITE, swp_buffer->page); --- 201,205 ---- if (!swp_buffer) goto out; ! rw_swap_page(WRITE, swp_buffer->page); *************** *** 339,343 **** fragment->flags = 0; fragment->comp_page = comp_page; ! /* let's update some important fields */ comp_page->free_space -= compressed_size; --- 339,343 ---- fragment->flags = 0; fragment->comp_page = comp_page; ! /* let's update some important fields */ comp_page->free_space -= compressed_size; Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** vswap.c 2002/01/16 16:30:12 1.17 --- vswap.c 2002/01/24 22:05:04 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-14 16:58:59 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-24 19:27:40 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 26,30 **** unsigned short * last_page_size; unsigned short last_page = 0; ! unsigned int estimated_pages; static kmem_cache_t * pte_cachep; --- 26,30 ---- unsigned short * last_page_size; unsigned short last_page = 0; ! long estimated_pages; static kmem_cache_t * pte_cachep; *************** *** 50,59 **** int comp_cache_available_space(void) { ! int mean_size; ! mean_size = (unsigned short) (estimated_free_space/real_num_comp_pages); ! if (mean_size < 0 || mean_size > PAGE_SIZE) { ! printk("mean_size: %d\n", mean_size); BUG(); } --- 50,59 ---- int comp_cache_available_space(void) { ! int available_mean_size; ! available_mean_size = (unsigned short) (estimated_free_space/real_num_comp_pages); ! if (available_mean_size < 0 || available_mean_size > PAGE_SIZE) { ! printk("available_mean_size: %d\n", available_mean_size); BUG(); } *************** *** 61,66 **** if (!vswap_address_available()) return 0; ! ! if (mean_size < comp_cache_mean_size()) return 0; --- 61,68 ---- if (!vswap_address_available()) return 0; ! ! /* it the available mean size in the comp cache bigger than ! * the mean size of the last pages? if it's not, return 0 */ ! if (available_mean_size < comp_cache_mean_size()) return 0; *************** *** 84,89 **** return entry; - estimated_pages--; - vswap = list_entry(vswap_address_free_head.next, struct vswap_address, list); list_del_init(vswap_address_free_head.next); --- 86,89 ---- *************** *** 99,106 **** vswap_address[offset]->count = count; - vswap_address[offset]->fragment = VSWAP_RESERVED; vswap_address[offset]->pte_list = NULL; ! vswap_address[offset]->swap_cache_page = NULL; entry = SWP_ENTRY(type, offset); --- 99,109 ---- vswap_address[offset]->count = count; vswap_address[offset]->pte_list = NULL; ! vswap_address[offset]->swap_cache_page = NULL; ! vswap_address[offset]->real_entry.val = 0; + vswap_address[offset]->fragment = VSWAP_RESERVED; + estimated_pages--; + entry = SWP_ENTRY(type, offset); *************** *** 123,126 **** --- 126,132 ---- BUG(); + if (vswap_address[offset]->real_entry.val) + BUG(); + vswap_address[offset]->count++; } *************** *** 142,154 **** BUG(); if (--vswap->count) return vswap->count; ! /* do we have a compressed page for this virtual entry? in the ! * case we do, let's free the entry. */ ! if (reserved(offset)) { ! estimated_pages++; goto out; - } if (!vswap->fragment) --- 148,161 ---- BUG(); + /* a real address has been assigned, but we couldn't set this + * pte, so let's update the swap count for the real address */ + if (vswap->real_entry.val) + swap_free(vswap->real_entry); + if (--vswap->count) return vswap->count; ! if (reserved(offset)) goto out; if (!vswap->fragment) *************** *** 166,169 **** --- 173,178 ---- out: + estimated_pages++; + /* remove from the used virtual swap entries list. This list * contains only virtual swap addresses which have compressed *************** *** 181,184 **** --- 190,194 ---- vswap->pte_list = NULL; vswap->swap_cache_page = NULL; + vswap->real_entry.val = 0; if (!list_empty(&vswap->list)) *************** *** 220,223 **** --- 230,234 ---- vswap_address[offset]->fragment = VSWAP_RESERVED; + estimated_pages--; /* remove the virtual swap entry from the used list, since *************** *** 227,231 **** estimated_free_space += fragment->compressed_size; - estimated_pages--; } --- 238,241 ---- *************** *** 244,250 **** offset = SWP_OFFSET(entry); ! if (reserved(offset)) ! estimated_pages++; ! vswap_address[offset]->fragment = fragment; --- 254,265 ---- offset = SWP_OFFSET(entry); ! if (!reserved(offset)) ! BUG(); ! ! estimated_pages++; ! ! if (estimated_pages > NUM_VSWAP_ENTRIES) ! BUG(); ! vswap_address[offset]->fragment = fragment; *************** *** 332,336 **** entry.val = page->index; ! if (!vswap_address(entry)) return; --- 347,351 ---- entry.val = page->index; ! if (!vswap_address(entry)) return; *************** *** 381,384 **** --- 396,400 ---- vswap_address[i]->swap_cache_page = NULL; vswap_address[i]->fragment = NULL; + vswap_address[i]->real_entry.val = 0; list_add(&(vswap_address[i]->list), &vswap_address_free_head); *************** *** 386,390 **** estimated_free_space = PAGE_SIZE * real_num_comp_pages; ! estimated_pages = (1.5 * real_num_comp_pages); last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short)); --- 402,406 ---- estimated_free_space = PAGE_SIZE * real_num_comp_pages; ! estimated_pages = 1.5 * real_num_comp_pages; last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short)); |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-24 22:05:07
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv7824/mm Modified Files: memory.c swapfile.c Log Message: This patch include changes regarding the tlb and cache flushing when setting pte. It also fixes a stupid bug in vswap. - comp_cache_release() split into comp_cache_release() and comp_cache_use_address(). - comp_cache_use_address() now searches for the vma for the pte we are going to change. If found, we flush the cache and tlb for this pte before setting it. If not found, we count this reference for the swap count and when this pte faults in or gets freed, we fix the counters. - so we have to handle the cases in lookup_comp_cache() in which the pte gets changed when we sleep to get a new page. In this case, we check to see if the pte has changed. If it has, wow, it's time to return and waits it to fault in again, now with the real address. - to flush, some code was copied from rmap patch by Rik van Riel. He has the same problem and devised a nice way to decrease complexity when looking for the mm struct. - real_entry field was added back to vswap_address struct - fix a _stupid_ bug in vswap. The estimated_pages is a variable that avoids that we assign a huge number of vswaps which are reserved (vswap which is used but does not have a fragment, so we don't know how bit the fragment will be). This variable was supposed to have negative values, _but_ it was declared as unsigned long. :-( - (in this meanwhile I tried to add a referente to the swap count for our compressed fragment, but it turned out to perform not as well as the old code. I intended to remove that find_comp_page() and comp_cache_free() from comp_cache_release(), but that take less time to execute than some general idea, so I reverted the changes). Index: memory.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** memory.c 2002/01/14 12:05:08 1.17 --- memory.c 2002/01/24 22:05:03 1.18 *************** *** 103,106 **** --- 103,107 ---- pte = pte_offset(dir, 0); pmd_clear(dir); + pgtable_remove_rmap(pte); pte_free(pte); } *************** *** 1103,1109 **** --- 1104,1138 ---- int ret = 1; + if (vswap_address(entry)) { + unsigned long offset = SWP_OFFSET(entry); + + if (vswap_address[offset]->real_entry.val) { + swp_entry_t old_entry; + + remove_pte_vswap(page_table); + + pte = ptep_get_and_clear(page_table); + flush_tlb_page(vma, address); + flush_cache_page(vma, address); + + swap_duplicate(vswap_address[offset]->real_entry); + set_pte(page_table, swp_entry_to_pte(vswap_address[offset]->real_entry)); + + old_entry.val = entry.val; + entry.val = vswap_address[offset]->real_entry.val; + + comp_cache_swp_free_generic(old_entry, 0); + } + } + + spin_unlock(&mm->page_table_lock); page = lookup_comp_cache(entry); + /* we may sleep in the above function and a vswap addressed + * pte can have a real address assigned in the meanwhile */ + if (!page && !pte_same(*page_table, orig_pte)) + return 1; + if (!page) { swapin_readahead(entry); *************** *** 1436,1439 **** --- 1465,1469 ---- } } + pgtable_add_rmap(new, mm, address); pmd_populate(mm, pmd, new); } Index: swapfile.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/swapfile.c,v retrieving revision 1.22 retrieving revision 1.23 diff -C2 -r1.22 -r1.23 *** swapfile.c 2001/12/21 18:33:11 1.22 --- swapfile.c 2002/01/24 22:05:03 1.23 *************** *** 199,202 **** --- 199,203 ---- { int count; + swp_entry_t entry; if (vswap_info_struct(p)) *************** *** 206,219 **** if (count < SWAP_MAP_MAX) { count--; - p->swap_map[offset] = count; if (!count) { #ifdef CONFIG_COMP_SWAP p->swap_comp[offset] = 0; #endif ! /* let's keep the swap_map[offset] used for ! * the case the comp_cache_release() calls ! * swap_dup() */ ! p->swap_map[offset]++; ! if (!comp_cache_release(SWP_ENTRY(p - swap_info, offset))) { if (offset < p->lowest_bit) p->lowest_bit = offset; --- 207,217 ---- if (count < SWAP_MAP_MAX) { count--; if (!count) { #ifdef CONFIG_COMP_SWAP p->swap_comp[offset] = 0; #endif ! entry = SWP_ENTRY(p - swap_info, offset); ! comp_cache_release(entry); ! if (!comp_cache_use_address(entry)) { if (offset < p->lowest_bit) p->lowest_bit = offset; *************** *** 222,227 **** nr_swap_pages++; } ! p->swap_map[offset]--; } } return count; --- 220,227 ---- nr_swap_pages++; } ! count = p->swap_map[offset]; ! count--; } + p->swap_map[offset] = count; } return count; |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-24 22:05:07
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv7824/include/linux Modified Files: comp_cache.h Log Message: This patch include changes regarding the tlb and cache flushing when setting pte. It also fixes a stupid bug in vswap. - comp_cache_release() split into comp_cache_release() and comp_cache_use_address(). - comp_cache_use_address() now searches for the vma for the pte we are going to change. If found, we flush the cache and tlb for this pte before setting it. If not found, we count this reference for the swap count and when this pte faults in or gets freed, we fix the counters. - so we have to handle the cases in lookup_comp_cache() in which the pte gets changed when we sleep to get a new page. In this case, we check to see if the pte has changed. If it has, wow, it's time to return and waits it to fault in again, now with the real address. - to flush, some code was copied from rmap patch by Rik van Riel. He has the same problem and devised a nice way to decrease complexity when looking for the mm struct. - real_entry field was added back to vswap_address struct - fix a _stupid_ bug in vswap. The estimated_pages is a variable that avoids that we assign a huge number of vswaps which are reserved (vswap which is used but does not have a fragment, so we don't know how bit the fragment will be). This variable was supposed to have negative values, _but_ it was declared as unsigned long. :-( - (in this meanwhile I tried to add a referente to the swap count for our compressed fragment, but it turned out to perform not as well as the old code. I intended to remove that find_comp_page() and comp_cache_free() from comp_cache_release(), but that take less time to execute than some general idea, so I reverted the changes). Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.42 retrieving revision 1.43 diff -C2 -r1.42 -r1.43 *** comp_cache.h 2002/01/16 16:30:11 1.42 --- comp_cache.h 2002/01/24 22:05:03 1.43 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-14 16:42:11 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-24 18:59:40 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 28,32 **** #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.21pre7" /* maximum compressed size of a page */ --- 28,32 ---- #include <linux/WKcommon.h> ! #define COMP_CACHE_VERSION "0.21" /* maximum compressed size of a page */ *************** *** 87,106 **** /* adaptivity.c */ #ifdef CONFIG_COMP_CACHE - int shrink_comp_cache(comp_cache_t *); inline int grow_comp_cache(zone_t *, int); - #else ! ! static inline int shrink_comp_cache(comp_cache_t * comp_page) ! { ! return 0; ! } ! ! static inline int grow_comp_cache(zone_t * zone, int nr_pages) ! { ! return 0; ! } ! #endif --- 87,95 ---- /* adaptivity.c */ #ifdef CONFIG_COMP_CACHE int shrink_comp_cache(comp_cache_t *); inline int grow_comp_cache(zone_t *, int); #else ! static inline int shrink_comp_cache(comp_cache_t * comp_page) { return 0; } ! static inline int grow_comp_cache(zone_t * zone, int nr_pages) { return 0; } #endif *************** *** 340,347 **** #else extern int swap_writepage(struct page*); ! static inline int compress_page(struct page * page) ! { ! return swap_writepage(page); ! } static inline void comp_cache_init(void) {}; #endif --- 329,333 ---- #else extern int swap_writepage(struct page*); ! static inline int compress_page(struct page * page) { return swap_writepage(page); } static inline void comp_cache_init(void) {}; #endif *************** *** 355,358 **** --- 341,345 ---- comp_cache_fragment_t * fragment; + swp_entry_t real_entry; struct page * swap_cache_page; *************** *** 398,423 **** static inline void comp_cache_swp_duplicate(swp_entry_t entry) {}; ! static inline int comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment) ! { ! return 0; ! } ! static inline int comp_cache_swp_count(swp_entry_t entry) ! { ! return 0; ! } ! static inline int comp_cache_available_space(void) ! { ! return 0; ! } ! static inline swp_entry_t get_virtual_swap_page(struct page * page, unsigned short count) ! { ! return ((swp_entry_t) { 0 }); ! } static inline void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {}; static inline void remove_pte_vswap(pte_t * ptep) {}; static inline void add_swap_cache_page_vswap(struct page * page, swp_entry_t entry) {}; static inline void del_swap_cache_page_vswap(struct page * page) {}; - #endif --- 385,397 ---- static inline void comp_cache_swp_duplicate(swp_entry_t entry) {}; ! static inline int comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment) { return 0; } ! static inline int comp_cache_swp_count(swp_entry_t entry) { return 0; } ! static inline int comp_cache_available_space(void) { return 0; } ! static inline swp_entry_t get_virtual_swap_page(struct page * page, unsigned short count) { return ((swp_entry_t) { 0 }); } static inline void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {}; static inline void remove_pte_vswap(pte_t * ptep) {}; static inline void add_swap_cache_page_vswap(struct page * page, swp_entry_t entry) {}; static inline void del_swap_cache_page_vswap(struct page * page) {}; #endif *************** *** 426,434 **** int comp_cache_free_locked(comp_cache_fragment_t *); - #ifdef CONFIG_COMP_CACHE ! int comp_cache_release(swp_entry_t); #else ! static inline int comp_cache_release(swp_entry_t entry) { return 0; } #endif --- 400,448 ---- int comp_cache_free_locked(comp_cache_fragment_t *); #ifdef CONFIG_COMP_CACHE ! inline void comp_cache_release(swp_entry_t); ! int comp_cache_use_address(swp_entry_t); ! ! /* from Riel's rmap patch */ ! static inline void pgtable_add_rmap(pte_t * ptep, struct mm_struct * mm, unsigned long address) ! { ! struct page * page = virt_to_page(ptep); ! ! page->mapping = (void *)mm; ! page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1); ! } ! ! static inline void pgtable_remove_rmap(pte_t * ptep) ! { ! struct page * page = virt_to_page(ptep); ! ! page->mapping = NULL; ! page->index = 0; ! } ! ! static inline struct mm_struct * ptep_to_mm(pte_t * ptep) ! { ! struct page * page = virt_to_page(ptep); ! ! return (struct mm_struct *) page->mapping; ! } ! ! static inline unsigned long ptep_to_address(pte_t * ptep) ! { ! struct page * page = virt_to_page(ptep); ! unsigned long low_bits; ! ! low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE; ! return page->index + low_bits; ! } ! ! #else ! static inline int comp_cache_use_address(swp_entry_t entry) { return 0; } ! static inline void comp_cache_release(swp_entry_t entry) { }; ! static inline void pgtable_add_rmap(pte_t * ptep, struct mm_struct * mm, unsigned long address) { } ! static inline void pgtable_remove_rmap(pte_t * ptep) { } ! static inline struct mm_struct * ptep_to_mm(pte_t * ptep) { return 0; } ! static inline unsigned long ptep_to_address(pte_t * ptep) { return 0; } #endif *************** *** 477,488 **** inline int comp_cache_free_space(void); #else ! ! static inline int comp_cache_free_space(void) ! { ! return 0; ! } ! #endif - /* proc.c */ --- 491,496 ---- inline int comp_cache_free_space(void); #else ! static inline int comp_cache_free_space(void) { return 0; } #endif /* proc.c */ |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-17 19:24:36
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv28815/mm/comp_cache Modified Files: aux.c Log Message: Fix that make kernel parameter for compressed cache size work again. This bug would also make the hash table scale following the CONFIG_COMP_CACHE_SIZE no matter the number the user might have entered there (1 million, for instance). Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** aux.c 2002/01/16 16:30:12 1.13 --- aux.c 2002/01/17 19:24:32 1.14 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-16 13:10:07 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-17 16:59:52 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 346,350 **** /* fragment hash table */ ! fragment_hash_size = 3* CONFIG_COMP_CACHE_SIZE * sizeof(comp_cache_fragment_t *); for (order = 0; (PAGE_SIZE << order) < fragment_hash_size; order++); --- 346,350 ---- /* fragment hash table */ ! fragment_hash_size = 3 * max_num_comp_pages * sizeof(comp_cache_fragment_t *); for (order = 0; (PAGE_SIZE << order) < fragment_hash_size; order++); |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-16 16:30:17
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv2268/include/linux Modified Files: comp_cache.h Log Message: Simple changes regarding hash tables and some cleanups. - no need to enter a power of 2 number for the maximum number of compressed pages any longer. It's not increasing the hash table size in a dynamic way, but that will be done soon. - hash table initialization was changed to allocate contiguous pages for it, what increases a lot related functions' performance. - swapin_vswap() has been deleted. Not needed. - comp_cache_free() was renamed to comp_cache_free_locked() and the page is passed as paremeter has to be locked. comp_cache_free() function was created to handle cases where the page is not yet locked. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.41 retrieving revision 1.42 diff -C2 -r1.41 -r1.42 *** comp_cache.h 2002/01/14 12:05:08 1.41 --- comp_cache.h 2002/01/16 16:30:11 1.42 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-14 08:49:45 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-14 16:42:11 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 28,31 **** --- 28,33 ---- #include <linux/WKcommon.h> + #define COMP_CACHE_VERSION "0.21pre7" + /* maximum compressed size of a page */ #define MAX_COMPRESSED_SIZE 4500 *************** *** 421,425 **** /* free.c */ ! inline void comp_cache_free(comp_cache_fragment_t *); #ifdef CONFIG_COMP_CACHE --- 423,429 ---- /* free.c */ ! inline int comp_cache_free(comp_cache_fragment_t *); ! int comp_cache_free_locked(comp_cache_fragment_t *); ! #ifdef CONFIG_COMP_CACHE *************** *** 435,447 **** comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); inline void check_all_fragments(comp_cache_t *); ! #define FRAGMENT_HASH_SIZE (CONFIG_COMP_CACHE_SIZE/8) ! #define fragment_hashfn(entry) ((SWP_OFFSET(entry) >> 2) & (FRAGMENT_HASH_SIZE - 1)) inline void add_fragment_to_hash_table(comp_cache_fragment_t *); inline void remove_fragment_from_hash_table(comp_cache_fragment_t *); - #define FREE_SPACE_INTERVAL 100 - #define FREE_SPACE_HASH_SIZE ((int) (PAGE_SIZE/FREE_SPACE_INTERVAL) + 2) static inline int free_space_hashfn(int free_space) { --- 439,456 ---- comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); inline void check_all_fragments(comp_cache_t *); + + extern unsigned int fragment_hash_size; ! static inline int fragment_hashfn(swp_entry_t entry) ! { ! return ((SWP_OFFSET(entry) >> 2) & (fragment_hash_size - 1)); ! } inline void add_fragment_to_hash_table(comp_cache_fragment_t *); inline void remove_fragment_from_hash_table(comp_cache_fragment_t *); + + extern unsigned int free_space_hash_size; + extern unsigned int free_space_interval; static inline int free_space_hashfn(int free_space) { *************** *** 449,455 **** return 0; ! free_space -= (free_space % FREE_SPACE_INTERVAL); ! return (free_space/FREE_SPACE_INTERVAL + 1); } --- 458,464 ---- return 0; ! free_space -= (free_space % free_space_interval); ! return (free_space/free_space_interval + 1); } |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-16 16:30:17
|
Update of /cvsroot/linuxcompressed/linux/Documentation In directory usw-pr-cvs1:/tmp/cvs-serv2268/Documentation Modified Files: Configure.help Log Message: Simple changes regarding hash tables and some cleanups. - no need to enter a power of 2 number for the maximum number of compressed pages any longer. It's not increasing the hash table size in a dynamic way, but that will be done soon. - hash table initialization was changed to allocate contiguous pages for it, what increases a lot related functions' performance. - swapin_vswap() has been deleted. Not needed. - comp_cache_free() was renamed to comp_cache_free_locked() and the page is passed as paremeter has to be locked. comp_cache_free() function was created to handle cases where the page is not yet locked. Index: Configure.help =================================================================== RCS file: /cvsroot/linuxcompressed/linux/Documentation/Configure.help,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -r1.2 -r1.3 *** Configure.help 2002/01/04 22:24:06 1.2 --- Configure.help 2002/01/16 16:30:11 1.3 *************** *** 400,405 **** Here you choose the maximum number of memory pages used by the Compressed Cache. If the number is greater than half of memory size, ! it will set to 512, the default value. The number must be a power of ! two. The maximum value will be not necessarily used and can be configured --- 400,404 ---- Here you choose the maximum number of memory pages used by the Compressed Cache. If the number is greater than half of memory size, ! it will set to 512, the default value. The maximum value will be not necessarily used and can be configured |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-16 16:30:17
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv2268/mm/comp_cache Modified Files: aux.c free.c main.c swapin.c swapout.c vswap.c Log Message: Simple changes regarding hash tables and some cleanups. - no need to enter a power of 2 number for the maximum number of compressed pages any longer. It's not increasing the hash table size in a dynamic way, but that will be done soon. - hash table initialization was changed to allocate contiguous pages for it, what increases a lot related functions' performance. - swapin_vswap() has been deleted. Not needed. - comp_cache_free() was renamed to comp_cache_free_locked() and the page is passed as paremeter has to be locked. comp_cache_free() function was created to handle cases where the page is not yet locked. Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.12 retrieving revision 1.13 diff -C2 -r1.12 -r1.13 *** aux.c 2002/01/14 12:05:08 1.12 --- aux.c 2002/01/16 16:30:12 1.13 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-13 16:16:07 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-16 13:10:07 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 13,21 **** #include <linux/pagemap.h> #include <linux/init.h> ! comp_cache_fragment_t * fragment_hash[FRAGMENT_HASH_SIZE]; ! comp_cache_t * free_space_hash[FREE_SPACE_HASH_SIZE]; /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long --- 13,26 ---- #include <linux/pagemap.h> #include <linux/init.h> + #include <linux/vmalloc.h> ! comp_cache_fragment_t ** fragment_hash; ! unsigned int fragment_hash_size; + comp_cache_t ** free_space_hash; + unsigned int free_space_hash_size; + unsigned int free_space_interval; + /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long *************** *** 119,123 **** idx = free_space_hashfn(free_space); ! if (idx == FREE_SPACE_HASH_SIZE) goto check_exact_size; --- 124,128 ---- idx = free_space_hashfn(free_space); ! if (idx == free_space_hash_size) goto check_exact_size; *************** *** 127,131 **** do { comp_page = free_space_hash[i++]; ! } while(i < FREE_SPACE_HASH_SIZE && !comp_page); /* couldn't find a page? let's check the pages whose free --- 132,136 ---- do { comp_page = free_space_hash[i++]; ! } while(i < free_space_hash_size && !comp_page); /* couldn't find a page? let's check the pages whose free *************** *** 179,193 **** { comp_cache_fragment_t * fragment; *fragment_out = NULL; ! for (fragment = fragment_hash[fragment_hashfn(entry)]; fragment != NULL; fragment = fragment->next_hash) { if (fragment->index == entry.val) { *fragment_out = fragment; ! return (fragment->comp_page); } } ! ! return NULL; } --- 184,211 ---- { comp_cache_fragment_t * fragment; + comp_cache_t * comp_page; + comp_page = NULL; *fragment_out = NULL; + + fragment = fragment_hash[fragment_hashfn(entry)]; ! goto inside; ! ! for (;;) { ! fragment = fragment->next_hash; ! inside: ! if (!fragment) ! goto not_found; ! if (fragment->index == entry.val) { *fragment_out = fragment; ! comp_page = fragment->comp_page; ! break; } } ! ! not_found: ! return comp_page; } *************** *** 323,335 **** comp_cache_hash_init(void) { ! int i; ! /* inits fragment hash table */ ! for (i = 0; i < FRAGMENT_HASH_SIZE; i++) ! fragment_hash[i] = NULL; /* inits comp cache free space hash table */ ! for (i = 0; i < FREE_SPACE_HASH_SIZE; i++) ! free_space_hash[i] = NULL; } --- 341,383 ---- comp_cache_hash_init(void) { ! unsigned long htable_bits, order; ! ! /* (code heavily based on page_cache_init():filemap.c */ ! ! /* fragment hash table */ ! fragment_hash_size = 3* CONFIG_COMP_CACHE_SIZE * sizeof(comp_cache_fragment_t *); ! for (order = 0; (PAGE_SIZE << order) < fragment_hash_size; order++); ! ! do { ! unsigned long tmp = (PAGE_SIZE << order)/sizeof(comp_cache_fragment_t *); ! ! htable_bits = 0; ! while((tmp >>= 1UL) != 0UL) ! htable_bits++; ! ! fragment_hash = (comp_cache_fragment_t **) __get_free_pages(GFP_ATOMIC, order); ! } while(fragment_hash == NULL && --order > 0); ! ! fragment_hash_size = 1 << htable_bits; ! ! printk("Compressed Cache: fragment hash table - %u = %luB\n", fragment_hash_size, (PAGE_SIZE << order)); ! ! if (!fragment_hash) ! panic("comp_cache_hash_init(): couldn't allocate fragment hash table\n"); ! memset((void *) fragment_hash, 0, fragment_hash_size * sizeof(comp_cache_fragment_t *)); /* inits comp cache free space hash table */ ! free_space_interval = 100 * ((float) PAGE_SIZE)/4096; ! free_space_hash_size = (int) (PAGE_SIZE/free_space_interval) + 2; ! ! free_space_hash = vmalloc(free_space_hash_size * sizeof(comp_cache_t *)); ! ! printk("Compressed Cache: free space hash table - %u = %uB\n", free_space_hash_size, free_space_hash_size * sizeof(comp_cache_t *)); ! ! if (!free_space_hash) ! panic("comp_cache_hash_init(): couldn't allocate free space hash table\n"); ! ! memset((void *) free_space_hash, 0, free_space_hash_size * sizeof(comp_cache_t *)); } Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** free.c 2002/01/14 12:05:08 1.16 --- free.c 2002/01/16 16:30:12 1.17 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-13 19:00:05 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-14 16:13:12 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 70,75 **** } ! void ! comp_cache_free(comp_cache_fragment_t * fragment) { comp_cache_t * comp_page; --- 70,75 ---- } ! int ! comp_cache_free_locked(comp_cache_fragment_t * fragment) { comp_cache_t * comp_page; *************** *** 88,94 **** BUG(); - if (TryLockPage(comp_page->page)) - BUG(); - /* remove from the free space hash table to update it */ remove_comp_page_from_hash_table(comp_page); --- 88,91 ---- *************** *** 158,167 **** /* steal the page if we need to shrink the comp cache */ if (shrink_comp_cache(comp_page)) ! return; - add_comp_page_to_hash_table(comp_page); ! UnlockPage(comp_page->page); } int --- 155,183 ---- /* steal the page if we need to shrink the comp cache */ if (shrink_comp_cache(comp_page)) ! return 0; add_comp_page_to_hash_table(comp_page); ! ! return 1; ! } ! ! inline int ! comp_cache_free(comp_cache_fragment_t * fragment) { ! int retval = 0; ! ! if (!fragment) ! BUG(); ! ! if (TryLockPage(fragment->comp_page->page)) ! BUG(); ! ! retval = comp_cache_free_locked(fragment); ! ! if (retval) ! UnlockPage(fragment->comp_page->page); ! ! return retval; } + int Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.18 retrieving revision 1.19 diff -C2 -r1.18 -r1.19 *** main.c 2002/01/14 12:05:08 1.18 --- main.c 2002/01/16 16:30:12 1.19 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-11 18:55:28 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-14 16:46:46 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 181,185 **** min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre6 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ --- 181,185 ---- min_num_comp_pages = 0; ! printk("Compressed Cache: starting %s - %lu pages = %luKiB\n", COMP_CACHE_VERSION, max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** swapin.c 2002/01/14 12:05:08 1.14 --- swapin.c 2002/01/16 16:30:12 1.15 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 08:31:09 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 16:59:13 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 27,30 **** --- 27,32 ---- if (!PageCompCache(comp_page->page)) BUG(); + if (!PageLocked(comp_page->page)) + BUG(); if (!PageLocked(uncompressed_page)) BUG(); *************** *** 41,46 **** } - extern void swapin_vswap(comp_cache_t *, comp_cache_fragment_t *); - /** * decompress_and_free_fragment - decompress a fragment, freeing its --- 43,46 ---- *************** *** 55,66 **** { comp_cache_t * comp_page = fragment->comp_page; if (!PageLocked(page)) BUG(); decompress_page(fragment, page); ! comp_cache_free(fragment); ! swapin_vswap(comp_page, fragment); comp_cache_update_faultin_stats(); } --- 55,73 ---- { comp_cache_t * comp_page = fragment->comp_page; + int retval; if (!PageLocked(page)) BUG(); + if (TryLockPage(comp_page->page)) + BUG(); decompress_page(fragment, page); ! retval = comp_cache_free_locked(fragment); comp_cache_update_faultin_stats(); + + if (!retval) + return; + + UnlockPage(comp_page->page); } Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** swapout.c 2002/01/14 12:05:08 1.17 --- swapout.c 2002/01/16 16:30:12 1.18 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-13 18:21:19 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-14 15:39:40 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 70,73 **** --- 70,75 ---- CompFragmentClearIO(swp_buffer->fragment); comp_cache_free(swp_buffer->fragment); + + swp_buffer->fragment = NULL; out: Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** vswap.c 2002/01/14 12:05:08 1.16 --- vswap.c 2002/01/16 16:30:12 1.17 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-13 19:08:24 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-14 16:58:59 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 215,224 **** if (!vswap_address(entry)) return; - - /* it's ok fragment == NULL since the vswap may have been - * freed in swp_free, but the fragment was only set to Freed */ - if (vswap_address[offset]->fragment == VSWAP_RESERVED) - BUG(); vswap_address[offset]->fragment = VSWAP_RESERVED; --- 215,222 ---- if (!vswap_address(entry)) return; + if (reserved(offset)) + BUG(); + vswap_address[offset]->fragment = VSWAP_RESERVED; *************** *** 229,232 **** --- 227,231 ---- estimated_free_space += fragment->compressed_size; + estimated_pages--; } *************** *** 257,277 **** estimated_free_space -= fragment->compressed_size; } - - inline void - swapin_vswap(comp_cache_t * comp_page, comp_cache_fragment_t * fragment) - { - swp_entry_t entry; - unsigned long offset; - - entry.val = fragment->index; - - if (!vswap_address(entry)) - return; - - offset = SWP_OFFSET(entry); - - vswap_address[offset]->fragment = VSWAP_RESERVED; - estimated_pages--; - } inline void --- 256,259 ---- |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-16 16:30:15
|
Update of /cvsroot/linuxcompressed/linux/arch/i386 In directory usw-pr-cvs1:/tmp/cvs-serv2268/arch/i386 Modified Files: config.in Log Message: Simple changes regarding hash tables and some cleanups. - no need to enter a power of 2 number for the maximum number of compressed pages any longer. It's not increasing the hash table size in a dynamic way, but that will be done soon. - hash table initialization was changed to allocate contiguous pages for it, what increases a lot related functions' performance. - swapin_vswap() has been deleted. Not needed. - comp_cache_free() was renamed to comp_cache_free_locked() and the page is passed as paremeter has to be locked. comp_cache_free() function was created to handle cases where the page is not yet locked. Index: config.in =================================================================== RCS file: /cvsroot/linuxcompressed/linux/arch/i386/config.in,v retrieving revision 1.12 retrieving revision 1.13 diff -C2 -r1.12 -r1.13 *** config.in 2002/01/04 22:24:06 1.12 --- config.in 2002/01/16 16:30:11 1.13 *************** *** 201,205 **** dep_bool ' Swap Out in Compressed Format (Null Padding)' CONFIG_COMP_SWAP $CONFIG_COMP_CACHE if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! int 'Maximum Number of Compressed Pages - (Power of 2)' CONFIG_COMP_CACHE_SIZE 512 fi --- 201,205 ---- dep_bool ' Swap Out in Compressed Format (Null Padding)' CONFIG_COMP_SWAP $CONFIG_COMP_CACHE if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! int 'Maximum Number of Compressed Pages' CONFIG_COMP_CACHE_SIZE 2048 fi |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-14 12:05:11
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv13325/mm/comp_cache Modified Files: adaptivity.c aux.c free.c main.c swapin.c swapout.c vswap.c Log Message: This batch of changes still includes lots of cleanups and code rewrite to make it simpler. Perfomance increase has been noticed too. - number_of_pages in comp_cache_t removed. We can check if there are no fragments by fragments list. - vswap: no semaphore is needed. I have no idea why the functions {lock,unlock}_vswap has once been added. I can't see why they are needed. So they were removed. The same for real_entry field in struct vswap_address. - vswap: a new function has been added, namely add_fragment_vswap(), analogous to remove_fragment_vswap(). It's called from get_comp_cache_page() and it's a great hand to make things modular. - vm_enough_memory(): now we take into account compressed cache space when allowing an application to allocate memory. That is done calling a function named comp_cache_free_space() which returns, based upon the estimated_free_space, the number of pages that still can be compressed. - move_and_fix_fragments() deleted. comp_cache_free() has a new police to not move data to and fro all the time like before. We free the fragment but leave it there waiting for being merged with the free space. It's pretty simple, check the code. The new code has two new functions: merge_right_neighbour() and merge_left_neighbour(). - the fragments list is kept sorted by offset field. So, when freeing, we don't have to search for the next and previous fragments everytime. Since most of times it's just a plain list_add_tail() in get_comp_cache_page(), that makes the code simpler and nicer. - lookup_comp_cache() was partially rewritten, mainly due to the fact we won't sleep to get a lock on the comp_page. - find_and_lock_comp_page() function removed and find_nolock_comp_page() was renamed to find_comp_page(). All functions that previously called find_and_lock... now calls the find_comp_page() and locks the comp_page at once with TryLockPage(). - oom_kill() was fixed and takes into account the free space in compressed cache by calling comp_cache_available_space(). That avoids killing an application if we have space left in compressed cache yet. Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** adaptivity.c 2002/01/07 17:48:29 1.3 --- adaptivity.c 2002/01/14 12:05:08 1.4 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-07 11:18:58 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-12 14:21:34 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 25,29 **** /* if the comp_page is not empty, can't free it */ ! if (comp_page->number_of_pages) goto out; --- 25,29 ---- /* if the comp_page is not empty, can't free it */ ! if (!list_empty(&(comp_page->fragments))) goto out; *************** *** 61,65 **** /* we raced */ ! if (comp_page->number_of_pages) { UnlockPage(empty_comp_page->page); return retval; --- 61,65 ---- /* we raced */ ! if (!list_empty(&(comp_page->fragments))) { UnlockPage(empty_comp_page->page); return retval; Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -r1.11 -r1.12 *** aux.c 2002/01/10 12:39:31 1.11 --- aux.c 2002/01/14 12:05:08 1.12 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-08 16:33:30 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-13 16:16:07 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 176,180 **** inline comp_cache_t * ! find_nolock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; --- 176,180 ---- inline comp_cache_t * ! find_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; *************** *** 192,272 **** } ! /* ! * Search for entry in comp cache array. ! */ ! comp_cache_t * ! find_and_lock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { ! comp_cache_t * comp_page; ! comp_cache_fragment_t * fragment = NULL, * aux_fragment = NULL; ! struct page * page; ! struct list_head * fragment_lh; ! ! repeat: ! comp_page = find_nolock_comp_page(entry, fragment_out); ! if (comp_page) { ! page = comp_page->page; ! ! page_cache_get(page); ! ! lock_page(page); ! ! if (!comp_page->page) ! goto again; ! ! for_each_fragment(fragment_lh, comp_page) { ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (fragment->index == entry.val) { ! aux_fragment = fragment; ! break; ! } ! } ! ! if (aux_fragment) { ! if (aux_fragment != *fragment_out) ! *fragment_out = fragment; ! ! page_cache_release(page); ! return comp_page; ! } ! ! again: ! UnlockPage(page); ! page_cache_release(page); ! goto repeat; ! } ! return NULL; ! } ! ! inline void ! move_and_fix_fragments(comp_cache_t * comp_page, ! unsigned short offset_from, ! unsigned short offset_to, ! unsigned short size_to_move) { ! comp_cache_fragment_t * fragment; ! struct list_head * fragment_lh; ! ! /* to make sure we are not beyond our boundaries */ ! if (offset_from + size_to_move > PAGE_SIZE) ! BUG(); ! ! memmove(page_address(comp_page->page) + offset_to, page_address(comp_page->page) + offset_from, size_to_move); ! ! /* update all used fragments offsets */ ! for_each_fragment(fragment_lh, comp_page) { ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (!fragment->index) ! BUG(); ! ! if (fragment->offset < offset_from) ! continue; ! ! if (fragment->offset >= (offset_from + size_to_move)) ! continue; ! ! fragment->offset -= (offset_from - offset_to); ! } } --- 192,199 ---- } ! inline int ! comp_cache_free_space(void) { ! return 2 * (estimated_free_space >> PAGE_SHIFT); } *************** *** 290,294 **** comp_cache_fragment_t * fragment, * aux_fragment; struct list_head * fragment_lh, * aux_fragment_lh; ! int counter = 0, used_space = 0; swp_entry_t entry; --- 217,221 ---- comp_cache_fragment_t * fragment, * aux_fragment; struct list_head * fragment_lh, * aux_fragment_lh; ! int used_space = 0; swp_entry_t entry; *************** *** 312,320 **** used_space += fragment->compressed_size; - counter++; } - - if (counter != comp_page->number_of_pages) - BUG(); if (comp_page->free_space != PAGE_SIZE - used_space) --- 239,243 ---- Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** free.c 2002/01/10 12:39:31 1.15 --- free.c 2002/01/14 12:05:08 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-09 18:08:49 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-13 19:00:05 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 19,25 **** extern kmem_cache_t * fragment_cachep; ! extern inline void move_and_fix_fragments(comp_cache_t *, unsigned short, unsigned short, unsigned short); ! extern comp_cache_t * find_nolock_comp_page(swp_entry_t, comp_cache_fragment_t **); ! extern void remove_vswap(comp_cache_t *, comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ --- 19,23 ---- extern kmem_cache_t * fragment_cachep; ! extern void remove_fragment_vswap(comp_cache_fragment_t *); /* is fragment1 the left neighbour of fragment2? */ *************** *** 30,196 **** #define right_fragment(fragment1, fragment2) left_fragment(fragment2, fragment1) void ! comp_cache_free_nohash(comp_cache_fragment_t * fragment_to_free) { ! comp_cache_t * comp_page = fragment_to_free->comp_page; ! comp_cache_fragment_t * next_fragment, * previous_fragment, * fragment; ! struct list_head * fragment_lh, * fragment_to_free_lh; ! unsigned short offset_from, offset_to, size_to_move; if (!comp_page) BUG(); - if (!comp_page->page) BUG(); - - if (!PageLocked(comp_page->page)) - BUG(); - if (!PageCompCache(comp_page->page)) BUG(); ! ! if (!fragment_to_free) BUG(); ! if (not_compressed(fragment_to_free) && comp_page->free_space) BUG(); - - //check_all_fragments(comp_page); - - next_fragment = NULL; - previous_fragment = NULL; - fragment_to_free_lh = NULL; - - /* search the previous and the next fragment */ - for_each_fragment(fragment_lh, comp_page) { - fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); - - if (left_fragment(fragment_to_free, fragment)) { - /* corruption */ - if (next_fragment) - BUG(); - next_fragment = fragment; - } - - if (right_fragment(fragment_to_free, fragment)) { - /* corruption */ - if (previous_fragment) - BUG(); - previous_fragment = fragment; - } - - if (fragment == fragment_to_free) - fragment_to_free_lh = fragment_lh; ! /* corruption */ ! if (fragment->offset < fragment_to_free->offset && fragment->offset + fragment->compressed_size > fragment_to_free->offset) ! BUG(); ! } ! if (!fragment_to_free_lh) ! BUG(); ! //check_all_fragments(comp_page); ! /* simple case - no free space * 1 - one not compressed page * 2 - sum of all fragments = PAGE_SIZE */ if (!comp_page->free_space) { ! comp_page->free_offset = fragment_to_free->offset; ! goto out; } /* this fragment has the free space as its left neighbour */ ! if (comp_page->free_offset + comp_page->free_space == fragment_to_free->offset) { ! if (previous_fragment) ! BUG(); ! goto out; } /* this fragment has the free space as its right neighbour */ ! if (fragment_to_free->offset + fragment_to_free->compressed_size == comp_page->free_offset) { ! if (next_fragment) ! BUG(); ! comp_page->free_offset = fragment_to_free->offset; ! goto out; } /* we have used fragment(s) between the free space and the one we want to free */ ! if (comp_page->free_offset < fragment_to_free->offset) { ! if (comp_page->free_offset + comp_page->free_space >= fragment_to_free->offset) ! BUG(); ! offset_to = comp_page->free_offset; ! offset_from = comp_page->free_offset + comp_page->free_space; ! size_to_move = fragment_to_free->offset - offset_from; ! } ! else { ! if (fragment_to_free->offset + fragment_to_free->compressed_size >= comp_page->free_offset) ! BUG(); ! offset_to = fragment_to_free->offset; ! offset_from = fragment_to_free->offset + fragment_to_free->compressed_size; ! size_to_move = comp_page->free_offset - offset_from; ! } ! move_and_fix_fragments(comp_page, offset_from, offset_to, size_to_move); ! /* let's merge the fragments */ ! comp_page->free_offset = offset_to + size_to_move; ! out: ! remove_vswap(comp_page, fragment_to_free); ! ! if (comp_page->free_space > PAGE_SIZE) ! BUG(); ! ! /* free this fragments */ ! list_del(fragment_to_free_lh); ! remove_fragment_from_hash_table(fragment_to_free); ! remove_fragment_from_lru_queue(fragment_to_free); ! ! if (!comp_page->number_of_pages) ! BUG(); ! comp_page->free_space += fragment_to_free->compressed_size; ! comp_page->number_of_pages--; /* is this fragment waiting for swap out? let's not free it * now, but let's tell swap out path that it does not need IO * anymore because it has been freed (maybe due to swapin) */ ! if (CompFragmentIO(fragment_to_free)) { ! CompFragmentClearIO(fragment_to_free); ! return; ! } ! ! kmem_cache_free(fragment_cachep, (fragment_to_free)); ! } ! /** ! * - comp_cache_free: removes the fragment->comp_page from the avl ! * tree, frees the fragment and inserts it back into the avl tree if ! * we didn't shrink the cache ! * ! * - return value: nothing, but the fragment->comp_page->page will be ! * returned _unlocked_ ! * ! * @fragment: fragment which we are freeing */ ! inline void ! comp_cache_free(comp_cache_fragment_t * fragment) { ! comp_cache_t * comp_page = fragment->comp_page; - if (!PageLocked(comp_page->page)) - BUG(); ! /* remove from the free space hash table to update it */ ! remove_comp_page_from_hash_table(comp_page); ! ! /* effectively free it */ ! comp_cache_free_nohash(fragment); ! ! /* steal the page if we need to shrink the comp cache */ ! if (!shrink_comp_cache(comp_page)) { ! add_comp_page_to_hash_table(comp_page); ! UnlockPage(comp_page->page); ! } } --- 28,166 ---- #define right_fragment(fragment1, fragment2) left_fragment(fragment2, fragment1) + static inline void + merge_right_neighbour(comp_cache_fragment_t * fragment_to_free, comp_cache_fragment_t * right_fragment) + { + if (!right_fragment) + return; + + if (!right_fragment->index) { + fragment_to_free->compressed_size += right_fragment->compressed_size; + list_del(&(right_fragment->list)); + if (!CompFragmentTestandClearIO(right_fragment)) + kmem_cache_free(fragment_cachep, (right_fragment)); + + + } + } + + static inline void + merge_left_neighbour(comp_cache_fragment_t * fragment_to_free, comp_cache_fragment_t * left_fragment) + { + if (!left_fragment) + return; + + if (!left_fragment->index) { + fragment_to_free->offset = left_fragment->offset; + fragment_to_free->compressed_size += left_fragment->compressed_size; + + list_del(&(left_fragment->list)); + + if (!CompFragmentTestandClearIO(left_fragment)) + kmem_cache_free(fragment_cachep, (left_fragment)); + } + } + + static inline void + remove_fragment_from_comp_cache(comp_cache_fragment_t * fragment) + { + remove_fragment_vswap(fragment); + remove_fragment_from_hash_table(fragment); + remove_fragment_from_lru_queue(fragment); + } + void ! comp_cache_free(comp_cache_fragment_t * fragment) { ! comp_cache_t * comp_page; ! comp_cache_fragment_t * next_fragment, * previous_fragment; + if (!fragment) + BUG(); + comp_page = fragment->comp_page; if (!comp_page) BUG(); if (!comp_page->page) BUG(); if (!PageCompCache(comp_page->page)) BUG(); ! if (not_compressed(fragment) && comp_page->free_space) BUG(); ! if (TryLockPage(comp_page->page)) BUG(); ! /* remove from the free space hash table to update it */ ! remove_comp_page_from_hash_table(comp_page); ! /* fragment is added in the correct location to the comp_page ! * list (see get_comp_cache_page():swapout.c) */ ! next_fragment = NULL; ! if (fragment->list.next != &(comp_page->fragments)) ! next_fragment = list_entry(fragment->list.next, comp_cache_fragment_t, list); ! previous_fragment = NULL; ! if (fragment->list.prev != &(comp_page->fragments)) ! previous_fragment = list_entry(fragment->list.prev, comp_cache_fragment_t, list); ! /* simple case - no free space * 1 - one not compressed page * 2 - sum of all fragments = PAGE_SIZE */ if (!comp_page->free_space) { ! remove_fragment_from_comp_cache(fragment); ! comp_page->free_offset = fragment->offset; ! goto remove; } /* this fragment has the free space as its left neighbour */ ! if (comp_page->free_offset + comp_page->free_space == fragment->offset) { ! remove_fragment_from_comp_cache(fragment); ! ! merge_right_neighbour(fragment, next_fragment); ! goto remove; } /* this fragment has the free space as its right neighbour */ ! if (fragment->offset + fragment->compressed_size == comp_page->free_offset) { ! remove_fragment_from_comp_cache(fragment); ! ! merge_left_neighbour(fragment, previous_fragment); ! comp_page->free_offset = fragment->offset; ! goto remove; } /* we have used fragment(s) between the free space and the one we want to free */ ! remove_fragment_from_comp_cache(fragment); ! fragment->index = 0; ! merge_right_neighbour(fragment, next_fragment); ! merge_left_neighbour(fragment, previous_fragment); ! goto out; ! remove: ! /* remove the fragment from comp page */ ! list_del(&(fragment->list)); ! ! /* careful: that's not only the compressed size from this ! * fragment, but also the fragments that might have been ! * merged in merge_*_neighbour() functions above */ ! comp_page->free_space += fragment->compressed_size; /* is this fragment waiting for swap out? let's not free it * now, but let's tell swap out path that it does not need IO * anymore because it has been freed (maybe due to swapin) */ ! if (!CompFragmentTestandClearIO(fragment)) ! kmem_cache_free(fragment_cachep, (fragment)); ! out: ! /* steal the page if we need to shrink the comp cache */ ! if (shrink_comp_cache(comp_page)) ! return; ! add_comp_page_to_hash_table(comp_page); ! UnlockPage(comp_page->page); } *************** *** 209,220 **** * set the Freed bit, which will make it possible to be freed * later */ ! comp_page = find_nolock_comp_page(entry, &fragment); ! if (comp_page) { ! if (TryLockPage(comp_page->page)) ! BUG(); ! comp_cache_free(fragment); - } /* no virtual swap entry with a compressed page */ --- 179,186 ---- * set the Freed bit, which will make it possible to be freed * later */ ! comp_page = find_comp_page(entry, &fragment); ! if (comp_page) comp_cache_free(fragment); /* no virtual swap entry with a compressed page */ *************** *** 234,246 **** BUG(); - if (vswap_locked(vswap->offset)) - continue; - old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, vswap->offset); ! if (TryLockPage(vswap->fragment->comp_page->page)) { ! unlock_vswap(old_entry); continue; - } /* no swap cache page? ok, let's assign the real entry */ --- 200,207 ---- BUG(); old_entry = SWP_ENTRY(COMP_CACHE_SWP_TYPE, vswap->offset); ! if (TryLockPage(vswap->fragment->comp_page->page)) continue; /* no swap cache page? ok, let's assign the real entry */ *************** *** 253,257 **** /* couldn't lock the swap cache page, let's try * another page */ - unlock_vswap(old_entry); UnlockPage(vswap->fragment->comp_page->page); } --- 214,217 ---- *************** *** 279,287 **** vswap->fragment = VSWAP_RESERVED; - /* this info is just in the case some code path (eg swapin) - * need to know if a virtual swap entry has been set to a real - * swap entry. It will be cleared in comp_cache_swp_free() */ - vswap->real_entry.val = entry.val; - /* old_virtual_addressed_pte <- new real swap entry */ pte_list = vswap->pte_list; --- 239,242 ---- *************** *** 316,320 **** ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); --- 271,275 ---- ptep_get_and_clear(pte_list->ptep); ! set_pte(pte_list->ptep, swp_entry_to_pte(entry)); swap_duplicate(entry); *************** *** 340,344 **** } - unlock_vswap(old_entry); UnlockPage(comp_page->page); --- 295,298 ---- Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** main.c 2002/01/10 12:39:31 1.17 --- main.c 2002/01/14 12:05:08 1.18 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-07 16:08:23 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-11 18:55:28 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 156,160 **** (*comp_page)->free_offset = 0; - (*comp_page)->number_of_pages = 0; (*comp_page)->page = page; --- 156,159 ---- *************** *** 182,186 **** min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre5 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ --- 181,185 ---- min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre6 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** swapin.c 2002/01/10 12:39:31 1.13 --- swapin.c 2002/01/14 12:05:08 1.14 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-08 11:27:02 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-14 08:31:09 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 25,35 **** if (!comp_page || !comp_page->page) BUG(); - if (!PageCompCache(comp_page->page)) ! PAGE_BUG(comp_page->page); ! ! if (!PageLocked(comp_page->page)) ! PAGE_BUG(comp_page->page); ! if (!PageLocked(uncompressed_page)) BUG(); --- 25,30 ---- if (!comp_page || !comp_page->page) BUG(); if (!PageCompCache(comp_page->page)) ! BUG(); if (!PageLocked(uncompressed_page)) BUG(); *************** *** 53,78 **** * needed. * ! * return value: the decompressed page in locked state * @fragment: fragment to be freed * @uncompressed_page: the page which will store the uncompressed data */ ! static inline struct page * ! decompress_and_free_fragment(comp_cache_fragment_t * fragment, struct page * uncompressed_page) { comp_cache_t * comp_page = fragment->comp_page; ! if (!PageLocked(uncompressed_page)) ! BUG(); ! decompress_page(fragment, uncompressed_page); comp_cache_free(fragment); ! ! /* let's fix the virtual swap entry since we are swapping in */ ! swapin_vswap(comp_page, fragment); ! ! return uncompressed_page; } - #define real_swap_address_was_assigned() (real_entry.val != entry->val) - /** * lookup_comp_cache - looks up for this swap entry in swap cache and --- 48,68 ---- * needed. * ! * return value: nothing * @fragment: fragment to be freed * @uncompressed_page: the page which will store the uncompressed data */ ! static inline void ! decompress_and_free_fragment(comp_cache_fragment_t * fragment, struct page * page) { comp_cache_t * comp_page = fragment->comp_page; ! if (!PageLocked(page)) ! BUG(); ! decompress_page(fragment, page); comp_cache_free(fragment); ! swapin_vswap(comp_page, fragment); ! comp_cache_update_faultin_stats(); } /** * lookup_comp_cache - looks up for this swap entry in swap cache and *************** *** 84,141 **** lookup_comp_cache(swp_entry_t entry) { ! struct page * page, * new_page = NULL; comp_cache_t * comp_page = NULL; comp_cache_fragment_t * fragment = NULL; - unsigned long offset; - - offset = SWP_OFFSET(entry); try_swap_cache: /* it might have been compressed but not yet freed */ ! page = lookup_swap_cache(entry); ! ! comp_page = find_and_lock_comp_page(entry, &fragment); /* ok, found in swap cache */ ! if (page) goto found_swap_cache; - - /* we might have slept in find_and_lock above */ - page = lookup_swap_cache(entry); - - if (page) - goto found_swap_cache; ! if (!comp_page) ! goto out; ! ! if (!comp_page->page) ! BUG(); ! ! /* sanity check */ ! if (!PageCompCache(comp_page->page)) ! PAGE_BUG(comp_page->page); ! ! if (!new_page) { ! UnlockPage(comp_page->page); ! ! /* allocate page for decompression */ ! new_page = alloc_page(GFP_HIGHUSER); ! /* we may sleep to allocate the page above, so let us ! * see if somebody has not swapped in this page */ goto try_swap_cache; } ! if (TryLockPage(new_page)) BUG(); ! page = decompress_and_free_fragment(fragment, new_page); ! ! /* fragment freed, should never happen */ ! if (!page) BUG(); ! ! comp_cache_update_faultin_stats(); if (add_to_swap_cache(page, entry)) --- 74,113 ---- lookup_comp_cache(swp_entry_t entry) { ! struct page * swap_cache_page, * page = NULL; comp_cache_t * comp_page = NULL; comp_cache_fragment_t * fragment = NULL; try_swap_cache: /* it might have been compressed but not yet freed */ ! swap_cache_page = lookup_swap_cache(entry); ! comp_page = find_comp_page(entry, &fragment); /* ok, found in swap cache */ ! if (swap_cache_page) goto found_swap_cache; ! if (!comp_page) { ! if (vswap_address(entry)) ! BUG(); ! return NULL; ! } ! /* has the page already been allocated? if it's not, let's ! * allocate and try again since we may sleep */ ! if (!page) { ! page = alloc_page(GFP_HIGHUSER); goto try_swap_cache; } ! if (TryLockPage(page)) BUG(); ! /* sanity check */ ! if (!comp_page->page) BUG(); ! if (!PageCompCache(comp_page->page)) ! BUG(); ! ! decompress_and_free_fragment(fragment, page); if (add_to_swap_cache(page, entry)) *************** *** 144,153 **** set_page_dirty(page); - DEBUG_CHECK_COUNT; - - new_page = NULL; - UnlockPage(page); ! goto out; found_swap_cache: --- 116,122 ---- set_page_dirty(page); UnlockPage(page); ! ! return page; found_swap_cache: *************** *** 161,178 **** * gotta set the page dirty bit back to make it to be * compressed if needed. */ ! set_page_dirty(page); ! comp_cache_free(fragment); } ! out: ! /* let's free the page which has been allocated but not used */ ! if (new_page) ! page_cache_release(new_page); ! ! if (vswap_address(entry) && !page) ! BUG(); ! return page; } --- 130,142 ---- * gotta set the page dirty bit back to make it to be * compressed if needed. */ ! set_page_dirty(swap_cache_page); ! comp_cache_free(fragment); } ! if (page) ! page_cache_release(page); ! return swap_cache_page; } *************** *** 206,217 **** /* fall through */ try_comp_cache: ! comp_page = find_and_lock_comp_page(*entry, &fragment); - /* check the page_cache again, in case we stalled above. */ - page = __find_get_page(mapping, idx, page_hash(mapping, idx)); - - if (page) - goto found_swap_cache; - if (!comp_page) { if (new_page) { --- 170,175 ---- /* fall through */ try_comp_cache: ! comp_page = find_comp_page(*entry, &fragment); if (!comp_page) { if (new_page) { *************** *** 226,229 **** --- 184,190 ---- BUG(); + if (TryLockPage(comp_page->page)) + BUG(); + /* sanity check */ if (!PageCompCache(comp_page->page)) *************** *** 242,246 **** BUG(); ! page = decompress_and_free_fragment(fragment, new_page); comp_cache_update_faultin_stats(); --- 203,208 ---- BUG(); ! decompress_and_free_fragment(fragment, new_page); ! page = new_page; comp_cache_update_faultin_stats(); *************** *** 256,261 **** found_swap_cache: ! if (!comp_page && find_nolock_comp_page(*entry, &fragment)) ! comp_page = find_and_lock_comp_page(*entry, &fragment); if (comp_page) { --- 218,223 ---- found_swap_cache: ! if (!comp_page && find_comp_page(*entry, &fragment)) ! comp_page = find_comp_page(*entry, &fragment); if (comp_page) { Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** swapout.c 2002/01/10 12:39:31 1.16 --- swapout.c 2002/01/14 12:05:08 1.17 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-09 19:05:54 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-13 18:21:19 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 23,32 **** struct list_head swp_free_buffer_head, swp_used_buffer_head; - #define swp_buffer_freed(swp_buffer) \ - (!PageLocked(swp_buffer->page) && list_empty(&swp_buffer->free_list)) - - #define swp_buffer_used(swp_buffer) \ - (page_count(swp_buffer->page) > 2 + !!swp_buffer->page->buffers) - /** * - find_free_swp_buffer - gets a swap buffer page. If there's a --- 23,26 ---- *************** *** 37,41 **** find_free_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page, * page; struct list_head * swp_buffer_lh, * tmp_lh; struct swp_buffer * swp_buffer; --- 31,35 ---- find_free_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page; struct list_head * swp_buffer_lh, * tmp_lh; struct swp_buffer * swp_buffer; *************** *** 62,75 **** * in? so let's free only the fragment struct */ if (!CompFragmentIO(swp_buffer->fragment)) { ! kmem_cache_free(fragment_cachep, (swp_buffer->fragment)); goto out; } ! /* it's not swapped out, so let' free it */ ! page = swp_buffer->fragment->comp_page->page; ! ! if (TryLockPage(page)) ! BUG(); CompFragmentClearIO(swp_buffer->fragment); comp_cache_free(swp_buffer->fragment); --- 56,71 ---- * in? so let's free only the fragment struct */ if (!CompFragmentIO(swp_buffer->fragment)) { ! kmem_cache_free(fragment_cachep, (swp_buffer->fragment)); goto out; } ! /* in the case it is waiting for merge in ! * comp_cache_free(), we can't free it */ ! if (!swp_buffer->fragment->index) { ! CompFragmentClearIO(swp_buffer->fragment); ! goto out; ! } + /* it's not swapped out, so let' free it */ CompFragmentClearIO(swp_buffer->fragment); comp_cache_free(swp_buffer->fragment); *************** *** 169,174 **** struct page * page; swp_entry_t entry; ! ! maxscan = 10; next_fragment = &lru_queue; --- 165,170 ---- struct page * page; swp_entry_t entry; ! ! maxscan = SWAP_CLUSTER_MAX; next_fragment = &lru_queue; *************** *** 211,215 **** } ! extern void comp_cache_free_nohash(comp_cache_fragment_t *); /** --- 207,211 ---- } ! extern void add_fragment_vswap(comp_cache_fragment_t *); /** *************** *** 227,237 **** get_comp_cache_page(struct page * swap_cache_page, unsigned short compressed_size, comp_cache_fragment_t ** fragment_out) { - struct list_head * fragment_lh = NULL, * temp_lh; comp_cache_t * comp_page = NULL; ! comp_cache_fragment_t * fragment = NULL; ! swp_entry_t entry; unsigned short aux_comp_size; int maxscan, maxtry; - unsigned long offset; if (!swap_cache_page) --- 223,231 ---- get_comp_cache_page(struct page * swap_cache_page, unsigned short compressed_size, comp_cache_fragment_t ** fragment_out) { comp_cache_t * comp_page = NULL; ! comp_cache_fragment_t * fragment = NULL, * previous_fragment = NULL; ! struct list_head * fragment_lh; unsigned short aux_comp_size; int maxscan, maxtry; if (!swap_cache_page) *************** *** 286,294 **** BUG(); - /* this entry is not used any longer, so we can free - * it for our use */ - if (!comp_page->number_of_pages) - goto reset; - goto check_references; } --- 280,283 ---- *************** *** 299,318 **** panic("no space in compressed cache whatsoever!\n"); - reset: - /* let's get rid of any fragment left */ - for_each_fragment_safe(fragment_lh, temp_lh, comp_page) { - fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); - - list_del(fragment_lh); - remove_fragment_from_hash_table(fragment); - remove_fragment_from_lru_queue(fragment); - kmem_cache_free(fragment_cachep, (fragment)); - } - - if (!list_empty(&(comp_page->fragments))) - BUG(); - - goto check_references; - new_page: /* remove from free space hash table before update */ --- 288,291 ---- *************** *** 365,398 **** fragment->comp_page = comp_page; - //check_all_fragments(comp_page); - /* let's update some important fields */ comp_page->free_space -= compressed_size; comp_page->free_offset += compressed_size; - comp_page->number_of_pages++; last_page_size[last_page] = compressed_size; last_page = (last_page++)%NUM_MEAN_PAGES; - - entry.val = fragment->index; - if (vswap_address(entry)) { - offset = SWP_OFFSET(entry); ! if (reserved(offset)) ! estimated_pages++; ! ! estimated_free_space -= compressed_size; ! vswap_address[offset]->fragment = fragment; ! if (!list_empty(&(vswap_address[offset]->list))) ! BUG(); ! list_add(&(vswap_address[offset]->list), &vswap_address_used_head); } ! /* add the fragment to the comp_page list of fragments */ ! list_add(&(fragment->list), &(comp_page->fragments)); /* only real swap adressed fragments are added to lru queue */ add_fragment_to_hash_table(fragment); --- 338,378 ---- fragment->comp_page = comp_page; /* let's update some important fields */ comp_page->free_space -= compressed_size; comp_page->free_offset += compressed_size; last_page_size[last_page] = compressed_size; last_page = (last_page++)%NUM_MEAN_PAGES; ! add_fragment_vswap(fragment); ! ! /* add the fragment to the comp_page list of fragments */ ! previous_fragment = list_entry(comp_page->fragments.prev, comp_cache_fragment_t, list); ! if (previous_fragment->offset + previous_fragment->compressed_size == fragment->offset) { ! list_add_tail(&(fragment->list), &(comp_page->fragments)); ! goto out; ! } ! /* let's search for the correct place in the comp_page list */ ! previous_fragment = NULL; ! ! for_each_fragment(fragment_lh, comp_page) { ! comp_cache_fragment_t * aux_fragment; ! aux_fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! ! if (aux_fragment->offset + aux_fragment->compressed_size > fragment->offset) ! break; ! ! previous_fragment = aux_fragment; } ! if (previous_fragment) ! list_add(&(fragment->list), &(previous_fragment->list)); ! else ! list_add(&(fragment->list), &(comp_page->fragments)); + out: /* only real swap adressed fragments are added to lru queue */ add_fragment_to_hash_table(fragment); Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** vswap.c 2002/01/10 12:39:31 1.15 --- vswap.c 2002/01/14 12:05:08 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-08 11:20:31 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-13 19:08:24 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 68,96 **** } - inline void - lock_vswap(swp_entry_t entry) { - unsigned long offset; - - if (!vswap_address(entry)) - return; - - offset = SWP_OFFSET(entry); - - down(&vswap_address[offset]->sem); - } - - inline void - unlock_vswap(swp_entry_t entry) { - unsigned long offset; - - if (!vswap_address(entry)) - return; - - offset = SWP_OFFSET(entry); - - up(&vswap_address[offset]->sem); - } - - swp_entry_t get_virtual_swap_page(struct page * page, unsigned short count) --- 68,71 ---- *************** *** 124,128 **** vswap_address[offset]->count = count; - vswap_address[offset]->real_entry.val = 0; vswap_address[offset]->fragment = VSWAP_RESERVED; vswap_address[offset]->pte_list = NULL; --- 99,102 ---- *************** *** 189,195 **** comp_page = fragment->comp_page; - if (TryLockPage(comp_page->page)) - BUG(); - comp_cache_free(fragment); --- 163,166 ---- *************** *** 207,211 **** vswap->count = 0; - vswap->real_entry.val = 0; vswap->fragment = NULL; vswap->pte_list = NULL; --- 178,181 ---- *************** *** 235,239 **** inline void ! remove_vswap(comp_cache_t * comp_page, comp_cache_fragment_t * fragment) { swp_entry_t entry; --- 205,209 ---- inline void ! remove_fragment_vswap(comp_cache_fragment_t * fragment) { swp_entry_t entry; *************** *** 246,251 **** return; - estimated_free_space += fragment->compressed_size; - /* it's ok fragment == NULL since the vswap may have been * freed in swp_free, but the fragment was only set to Freed */ --- 216,219 ---- *************** *** 259,262 **** --- 227,259 ---- * address */ list_del_init(&(vswap_address[offset]->list)); + + estimated_free_space += fragment->compressed_size; + } + + inline void + add_fragment_vswap(comp_cache_fragment_t * fragment) + { + swp_entry_t entry; + unsigned long offset; + + entry.val = fragment->index; + offset = SWP_OFFSET(entry); + + if (!vswap_address(entry)) + return; + + offset = SWP_OFFSET(entry); + + if (reserved(offset)) + estimated_pages++; + + vswap_address[offset]->fragment = fragment; + + if (!list_empty(&(vswap_address[offset]->list))) + BUG(); + + list_add(&(vswap_address[offset]->list), &vswap_address_used_head); + + estimated_free_space -= fragment->compressed_size; } *************** *** 399,409 **** vswap_address[i]->offset = i; - vswap_address[i]->real_entry.val = 0; vswap_address[i]->pte_list = NULL; vswap_address[i]->swap_cache_page = NULL; vswap_address[i]->fragment = NULL; - init_MUTEX(&vswap_address[i]->sem); - list_add(&(vswap_address[i]->list), &vswap_address_free_head); } --- 396,403 ---- |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-14 12:05:11
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv13325/mm Modified Files: memory.c mmap.c Added Files: oom_kill.c Log Message: This batch of changes still includes lots of cleanups and code rewrite to make it simpler. Perfomance increase has been noticed too. - number_of_pages in comp_cache_t removed. We can check if there are no fragments by fragments list. - vswap: no semaphore is needed. I have no idea why the functions {lock,unlock}_vswap has once been added. I can't see why they are needed. So they were removed. The same for real_entry field in struct vswap_address. - vswap: a new function has been added, namely add_fragment_vswap(), analogous to remove_fragment_vswap(). It's called from get_comp_cache_page() and it's a great hand to make things modular. - vm_enough_memory(): now we take into account compressed cache space when allowing an application to allocate memory. That is done calling a function named comp_cache_free_space() which returns, based upon the estimated_free_space, the number of pages that still can be compressed. - move_and_fix_fragments() deleted. comp_cache_free() has a new police to not move data to and fro all the time like before. We free the fragment but leave it there waiting for being merged with the free space. It's pretty simple, check the code. The new code has two new functions: merge_right_neighbour() and merge_left_neighbour(). - the fragments list is kept sorted by offset field. So, when freeing, we don't have to search for the next and previous fragments everytime. Since most of times it's just a plain list_add_tail() in get_comp_cache_page(), that makes the code simpler and nicer. - lookup_comp_cache() was partially rewritten, mainly due to the fact we won't sleep to get a lock on the comp_page. - find_and_lock_comp_page() function removed and find_nolock_comp_page() was renamed to find_comp_page(). All functions that previously called find_and_lock... now calls the find_comp_page() and locks the comp_page at once with TryLockPage(). - oom_kill() was fixed and takes into account the free space in compressed cache by calling comp_cache_available_space(). That avoids killing an application if we have space left in compressed cache yet. Index: memory.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** memory.c 2001/12/21 18:33:11 1.16 --- memory.c 2002/01/14 12:05:08 1.17 *************** *** 1081,1087 **** for (i = 0; i < num; offset++, i++) { /* Ok, do the async read-ahead now */ - lock_vswap(SWP_ENTRY(SWP_TYPE(entry), offset)); new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset)); - unlock_vswap(SWP_ENTRY(SWP_TYPE(entry), offset)); if (!new_page) --- 1081,1085 ---- *************** *** 1106,1116 **** spin_unlock(&mm->page_table_lock); - lock_vswap(entry); - - if (!pte_same(*page_table, orig_pte)) { - unlock_vswap(entry); - return 1; - } - page = lookup_comp_cache(entry); --- 1104,1107 ---- *************** *** 1147,1151 **** unlock_page(page); page_cache_release(page); - unlock_vswap(entry); return 1; } --- 1138,1141 ---- *************** *** 1155,1159 **** remove_pte_vswap(page_table); - unlock_vswap(entry); /* The page isn't present yet, go ahead with the fault. */ --- 1145,1148 ---- Index: mmap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/mmap.c,v retrieving revision 1.5 retrieving revision 1.6 diff -C2 -r1.5 -r1.6 *** mmap.c 2001/12/13 19:12:58 1.5 --- mmap.c 2002/01/14 12:05:08 1.6 *************** *** 82,85 **** --- 82,88 ---- free += swapper_space.nrpages; + /* Let's count the free space left in compressed cache */ + free += comp_cache_free_space(); + /* * The code below doesn't account for free space in the inode |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-14 12:05:11
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv13325/include/linux Modified Files: comp_cache.h Log Message: This batch of changes still includes lots of cleanups and code rewrite to make it simpler. Perfomance increase has been noticed too. - number_of_pages in comp_cache_t removed. We can check if there are no fragments by fragments list. - vswap: no semaphore is needed. I have no idea why the functions {lock,unlock}_vswap has once been added. I can't see why they are needed. So they were removed. The same for real_entry field in struct vswap_address. - vswap: a new function has been added, namely add_fragment_vswap(), analogous to remove_fragment_vswap(). It's called from get_comp_cache_page() and it's a great hand to make things modular. - vm_enough_memory(): now we take into account compressed cache space when allowing an application to allocate memory. That is done calling a function named comp_cache_free_space() which returns, based upon the estimated_free_space, the number of pages that still can be compressed. - move_and_fix_fragments() deleted. comp_cache_free() has a new police to not move data to and fro all the time like before. We free the fragment but leave it there waiting for being merged with the free space. It's pretty simple, check the code. The new code has two new functions: merge_right_neighbour() and merge_left_neighbour(). - the fragments list is kept sorted by offset field. So, when freeing, we don't have to search for the next and previous fragments everytime. Since most of times it's just a plain list_add_tail() in get_comp_cache_page(), that makes the code simpler and nicer. - lookup_comp_cache() was partially rewritten, mainly due to the fact we won't sleep to get a lock on the comp_page. - find_and_lock_comp_page() function removed and find_nolock_comp_page() was renamed to find_comp_page(). All functions that previously called find_and_lock... now calls the find_comp_page() and locks the comp_page at once with TryLockPage(). - oom_kill() was fixed and takes into account the free space in compressed cache by calling comp_cache_available_space(). That avoids killing an application if we have space left in compressed cache yet. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.40 retrieving revision 1.41 diff -C2 -r1.40 -r1.41 *** comp_cache.h 2002/01/10 12:39:30 1.40 --- comp_cache.h 2002/01/14 12:05:08 1.41 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-08 16:09:01 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-14 08:49:45 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 24,28 **** #include <asm/bitops.h> #include <asm/page.h> - #include <asm/semaphore.h> #include <linux/shmem_fs.h> --- 24,27 ---- *************** *** 64,70 **** /* fields for compression structure */ - unsigned short number_of_pages; unsigned short free_offset; - short free_space; --- 63,67 ---- *************** *** 118,121 **** --- 115,119 ---- #define CompFragmentSetIO(fragment) set_bit(CF_IO, &(fragment)->flags) #define CompFragmentTestandSetIO(fragment) test_and_set_bit(CF_IO, &(fragment)->flags) + #define CompFragmentTestandClearIO(fragment) test_and_clear_bit(CF_IO, &(fragment)->flags) #define CompFragmentClearIO(fragment) clear_bit(CF_IO, &(fragment)->flags) *************** *** 134,138 **** #define INF 0xffffffff ! #define NUM_SWP_BUFFERS (pager_daemon.swap_cluster * (1 << page_cluster)) /* do not change the fields order */ --- 132,136 ---- #define INF 0xffffffff ! #define NUM_SWP_BUFFERS 128 /* do not change the fields order */ *************** *** 351,356 **** struct list_head list; - swp_entry_t real_entry; - unsigned short count; unsigned short offset; --- 349,352 ---- *************** *** 361,365 **** struct pte_list * pte_list; - struct semaphore sem; }; --- 357,360 ---- *************** *** 375,380 **** #define vswap_info_struct(p) (p == &swap_info[COMP_CACHE_SWP_TYPE]) #define vswap_address(entry) (SWP_TYPE(entry) == COMP_CACHE_SWP_TYPE) - #define real_swap_address(offset) (vswap_address[offset]->real_entry.val) - #define vswap_locked(offset) (down_trylock(&vswap_address[offset]->sem)) #define reserved(offset) (vswap_address[offset]->fragment == VSWAP_RESERVED) --- 370,373 ---- *************** *** 394,400 **** inline void del_swap_cache_page_vswap(struct page *); - inline void lock_vswap(swp_entry_t); - inline void unlock_vswap(swp_entry_t); - #else --- 387,390 ---- *************** *** 428,434 **** static inline void del_swap_cache_page_vswap(struct page * page) {}; - static inline void lock_vswap(swp_entry_t entry) {}; - static inline void unlock_vswap(swp_entry_t entry) {}; - #endif --- 418,421 ---- *************** *** 444,448 **** /* aux.c */ unsigned long long big_division(unsigned long long, unsigned long long); ! inline comp_cache_t * find_nolock_comp_page(swp_entry_t, comp_cache_fragment_t **); comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); --- 431,435 ---- /* aux.c */ unsigned long long big_division(unsigned long long, unsigned long long); ! inline comp_cache_t * find_comp_page(swp_entry_t, comp_cache_fragment_t **); comp_cache_t * find_and_lock_comp_page(swp_entry_t, comp_cache_fragment_t **); *************** *** 476,479 **** --- 463,479 ---- inline void add_fragment_to_lru_queue(comp_cache_fragment_t *); inline void remove_fragment_from_lru_queue(comp_cache_fragment_t *); + + /* enough memory functions */ + #ifdef CONFIG_COMP_CACHE + inline int comp_cache_free_space(void); + #else + + static inline int comp_cache_free_space(void) + { + return 0; + } + + #endif + /* proc.c */ |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-10 12:39:34
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv10033/mm/comp_cache Modified Files: aux.c free.c main.c swapin.c swapout.c vswap.c Log Message: This batch of changes includes cleanups and code rewrites. The swap_out_fragments() and find_free_swap_buffer() are much simpler and also more efficient. Some preliminary tests showed a performance gain due to these improvements. - Fragment Freed Bit removed: the current code has been checked by and the few parts in the code where we could sleep with a comp page in locked state were rewritten (mainly swap out). It turns out that now we won't free a fragment without locking its page, and thus we don't need a bit to tell that the fragment needs to be freed. All special cases for Freed bit all over the code were removed. - Fragment SwapBuffer Bit removed: things changed a lot in swap buffer code. We don't add a "virtual" fragment to the comp cache hash table to avoid access to fragments which may be in swap out process. So, no need for this bit currently. - Fragment IO Bit added: instead of added a "virtual" fragment to the comp cache hash table, we don't free the fragment we are swapping out right after the IO function (rw_swap_page) has been called. We free it only when the IO has finished. But once the IO takes time, this fragment can be freed (maybe because it's been swapped in) in the meanwhile and so we have to tell the comp_cache_free() that this fragment is being written to disk and that its struct should not be freed. comp_cache_free() also clears this bit since we don't need to perform the IO (if it has not yet been submitted). In the case it has already been submitted, we don't have to free this fragment, only its struct. - find_free_swp_buffer() function was completely rewritten. There are two lists linking all the swap buffer pages: used and free. The comp page field in swp_buffer_t was removed. No array for swap buffers is needed, since they are linked in these lists (it's the first step to make it work with dynamic swap buffers, like listed on todo list). About its behaviour, there's no variable which counts the number of free swap buffers like before. So, once all the buffers were used, we try to move all the unlocked buffers to the free list at once. If we couldn't move even one, them we wait for one page to have its IO finished and go get this swap buffer. Our old code didn't perform this task so efficiently. For example, everytime we had a free fragment, we checked all the fragments to see if they were locked. With a used list we avoid this kind of overhead. - swap_out_fragments() was rewritten too. Part of the code was moved to decompress_to_swp_buffer() function. Since we don't have to worry about Freed bit, the code is much simpler. - find_*_comp_cache() functions don't have special cases for Freed fragments. - once there's no variable number_of_free_swp_buffers and we don't hold a referente to the swap entry in swap_out_fragments(), there's no need to update swap buffers in end_buffer_io_async(). Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.10 retrieving revision 1.11 diff -C2 -r1.10 -r1.11 *** aux.c 2002/01/07 17:48:29 1.10 --- aux.c 2002/01/10 12:39:31 1.11 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-07 14:44:05 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-08 16:33:30 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 172,180 **** return; ! list_del(&(fragment->lru_queue)); } ! comp_cache_t * ! __find_nolock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; --- 172,180 ---- return; ! list_del_init(&(fragment->lru_queue)); } ! inline comp_cache_t * ! find_nolock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) { comp_cache_fragment_t * fragment; *************** *** 183,210 **** for (fragment = fragment_hash[fragment_hashfn(entry)]; fragment != NULL; fragment = fragment->next_hash) { ! if (fragment->index == entry.val && !CompFragmentFreed(fragment)) { *fragment_out = fragment; return (fragment->comp_page); } } - return NULL; - } - - inline comp_cache_t * - find_nolock_comp_page(swp_entry_t entry, comp_cache_fragment_t ** fragment_out) - { - comp_cache_t * comp_page; ! comp_page = __find_nolock_comp_page(entry, fragment_out); ! ! if (!comp_page) ! return NULL; ! ! if (CompFragmentSwapBuffer(*fragment_out)) { ! *fragment_out = NULL; ! return NULL; ! } ! ! return comp_page; } --- 183,193 ---- for (fragment = fragment_hash[fragment_hashfn(entry)]; fragment != NULL; fragment = fragment->next_hash) { ! if (fragment->index == entry.val) { *fragment_out = fragment; return (fragment->comp_page); } } ! return NULL; } *************** *** 221,225 **** repeat: ! comp_page = __find_nolock_comp_page(entry, fragment_out); if (comp_page) { page = comp_page->page; --- 204,208 ---- repeat: ! comp_page = find_nolock_comp_page(entry, fragment_out); if (comp_page) { page = comp_page->page; *************** *** 235,252 **** fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! /* if it is a SwapBuffer fragment, there will ! * be only one, so we can return, once it can ! * have been used to another entry and ! * therefore the for loop may loop forever */ ! if (CompFragmentSwapBuffer(fragment)) { ! UnlockPage(page); ! page_cache_release(page); ! return NULL; ! } ! ! if (fragment->index == entry.val && !CompFragmentFreed(fragment)) { ! if (aux_fragment) ! BUG(); aux_fragment = fragment; } } --- 218,224 ---- fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! if (fragment->index == entry.val) { aux_fragment = fragment; + break; } } *************** *** 361,369 **** BUG(); ! if (aux_fragment->index == fragment->index) { ! if (CompFragmentFreed(aux_fragment) || CompFragmentFreed(fragment)) ! continue; BUG(); - } if (aux_fragment->offset < fragment->offset) { --- 333,338 ---- BUG(); ! if (aux_fragment->index == fragment->index) BUG(); if (aux_fragment->offset < fragment->offset) { Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** free.c 2002/01/07 17:48:29 1.14 --- free.c 2002/01/10 12:39:31 1.15 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-07 12:39:59 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-09 18:08:49 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 38,42 **** struct list_head * fragment_lh, * fragment_to_free_lh; unsigned short offset_from, offset_to, size_to_move; - int freed = 0; if (!comp_page) --- 38,41 ---- *************** *** 55,61 **** BUG(); - if (CompFragmentSwapBuffer(fragment_to_free)) - BUG(); - if (not_compressed(fragment_to_free) && comp_page->free_space) BUG(); --- 54,57 ---- *************** *** 63,69 **** //check_all_fragments(comp_page); - if (CompFragmentFreed(fragment_to_free)) - freed = 1; - next_fragment = NULL; previous_fragment = NULL; --- 59,62 ---- *************** *** 156,161 **** remove_fragment_from_lru_queue(fragment_to_free); - kmem_cache_free(fragment_cachep, (fragment_to_free)); - if (!comp_page->number_of_pages) BUG(); --- 149,152 ---- *************** *** 163,166 **** --- 154,167 ---- comp_page->free_space += fragment_to_free->compressed_size; comp_page->number_of_pages--; + + /* is this fragment waiting for swap out? let's not free it + * now, but let's tell swap out path that it does not need IO + * anymore because it has been freed (maybe due to swapin) */ + if (CompFragmentIO(fragment_to_free)) { + CompFragmentClearIO(fragment_to_free); + return; + } + + kmem_cache_free(fragment_cachep, (fragment_to_free)); } *************** *** 211,224 **** if (comp_page) { ! if (!TryLockPage(comp_page->page)) { ! comp_cache_free(fragment); ! goto assign_address; ! } ! ! if (CompFragmentTestandSetFreed(fragment)) BUG(); } - - assign_address: /* no virtual swap entry with a compressed page */ --- 212,220 ---- if (comp_page) { ! if (TryLockPage(comp_page->page)) BUG(); + + comp_cache_free(fragment); } /* no virtual swap entry with a compressed page */ Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.16 retrieving revision 1.17 diff -C2 -r1.16 -r1.17 *** main.c 2002/01/07 17:48:29 1.16 --- main.c 2002/01/10 12:39:31 1.17 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-07 11:44:08 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-07 16:08:23 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 182,186 **** min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre4 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ --- 182,186 ---- min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre5 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.12 retrieving revision 1.13 diff -C2 -r1.12 -r1.13 *** swapin.c 2002/01/04 22:24:07 1.12 --- swapin.c 2002/01/10 12:39:31 1.13 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-04 11:44:53 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-08 11:27:02 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 61,69 **** comp_cache_t * comp_page = fragment->comp_page; - if (CompFragmentFreed(fragment)) { - page_cache_release(uncompressed_page); - return NULL; - } - if (!PageLocked(uncompressed_page)) BUG(); --- 61,64 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** swapout.c 2002/01/07 17:48:29 1.15 --- swapout.c 2002/01/10 12:39:31 1.16 *************** *** 1,6 **** /* ! * linux/mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-07 15:33:19 rcastro> * * Linux Virtual Memory Compressed Cache --- 1,6 ---- /* ! * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-09 19:05:54 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 21,42 **** /* swap buffer */ ! struct list_head swp_free_buffer_head; ! struct list_head swp_buffer_head; ! struct swp_buffer ** swp_buffer; ! atomic_t number_of_free_swp_buffers; ! static inline void ! free_swp_buffer(struct swp_buffer * swp_buffer) ! { ! comp_cache_fragment_t * fragment; ! ! UnlockPage((swp_buffer)->comp_page->page); ! fragment = list_entry(swp_buffer->comp_page->fragments.next, comp_cache_fragment_t, list); ! list_add(&(fragment->list), &swp_free_buffer_head); ! atomic_inc(&number_of_free_swp_buffers); ! } ! ! #define swp_buffer_not_used(page, fragment) (!PageLocked(page) && list_empty(&fragment->list) && (page_count(page) == 1 + !!page->buffers)) /** --- 21,31 ---- /* swap buffer */ ! struct list_head swp_free_buffer_head, swp_used_buffer_head; ! #define swp_buffer_freed(swp_buffer) \ ! (!PageLocked(swp_buffer->page) && list_empty(&swp_buffer->free_list)) ! #define swp_buffer_used(swp_buffer) \ ! (page_count(swp_buffer->page) > 2 + !!swp_buffer->page->buffers) /** *************** *** 46,133 **** * - return value: pointer to the page which will be returned locked */ static struct swp_buffer * ! find_free_swp_buffer(void) { ! static struct list_head * cur_swp_entry = &swp_buffer_head; ! struct page * buffer_page; ! struct list_head * fragment_lh; ! struct swp_buffer * aux_buffer; ! comp_cache_fragment_t * fragment; ! int i; - /* all swap out buffers are locked for asynchronous write? - * let's wait one of them finish. It is _not_ worth to have - * more buffers in order to avoid waiting for the page lock at - * this moment since we are gonna stall at rw_swap_page_base() - * waiting for the page IO completion anyway. */ if (!list_empty(&swp_free_buffer_head)) goto get_a_page; - - if (!atomic_read(&number_of_free_swp_buffers)) - goto wait_page; ! for (i = 0; i < NUM_SWP_BUFFERS; i++) { ! struct page * page; ! page = swp_buffer[i]->comp_page->page; ! ! fragment_lh = swp_buffer[i]->comp_page->fragments.next; ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! if (swp_buffer_not_used(page, fragment)) { ! /* remove the fragment from comp_page */ ! remove_fragment_from_hash_table(fragment); ! ! /* add this fragment to free swap buffers list */ ! list_add(&(fragment->list), &swp_free_buffer_head); } } ! if (list_empty(&swp_free_buffer_head)) ! goto wait_page; get_a_page: ! /* list field of struct page is used to implement our free ! * swap buffer page list. To add the page back (when IO is ! * finished), we only need the struct page pointer and ! * swp_buffer_head in order to call list_add() */ ! fragment = list_entry(fragment_lh = swp_free_buffer_head.next, comp_cache_fragment_t, list); ! aux_buffer = (struct swp_buffer *) &(fragment->comp_page); ! buffer_page = aux_buffer->comp_page->page; if (TryLockPage(buffer_page)) BUG(); ! out: ! atomic_dec(&number_of_free_swp_buffers); ! /* let's remove this page from free swap buffer pages list */ ! list_del_init(fragment_lh); ! if (!list_empty(fragment_lh)) ! BUG(); ! return (aux_buffer); ! wait_page: ! cur_swp_entry = cur_swp_entry->next; ! if (cur_swp_entry == &swp_buffer_head) ! cur_swp_entry = swp_buffer_head.next; ! ! aux_buffer = list_entry(cur_swp_entry, struct swp_buffer, list); ! fragment_lh = aux_buffer->comp_page->fragments.next; ! fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); ! buffer_page = aux_buffer->comp_page->page; ! lock_page(buffer_page); ! if (list_empty(&fragment->list)) { ! remove_fragment_from_hash_table(fragment); ! list_add(&(fragment->list), &swp_free_buffer_head); ! } ! goto out; } --- 35,153 ---- * - return value: pointer to the page which will be returned locked */ static struct swp_buffer * ! find_free_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page, * page; ! struct list_head * swp_buffer_lh, * tmp_lh; ! struct swp_buffer * swp_buffer; ! int wait; ! ! CompFragmentSetIO(fragment); if (!list_empty(&swp_free_buffer_head)) goto get_a_page; ! wait = 0; ! try_again: ! list_for_each_safe(swp_buffer_lh, tmp_lh, &swp_used_buffer_head) { ! swp_buffer = list_entry(swp_buffer_lh, struct swp_buffer, list); ! if (PageLocked(swp_buffer->page)) { ! if (!wait) ! continue; ! list_del_init(swp_buffer_lh); ! wait_on_page(swp_buffer->page); ! } ! /* has the fragment we are swapping out been swapped ! * in? so let's free only the fragment struct */ ! if (!CompFragmentIO(swp_buffer->fragment)) { ! kmem_cache_free(fragment_cachep, (swp_buffer->fragment)); ! goto out; } + + /* it's not swapped out, so let' free it */ + page = swp_buffer->fragment->comp_page->page; + + if (TryLockPage(page)) + BUG(); + + CompFragmentClearIO(swp_buffer->fragment); + comp_cache_free(swp_buffer->fragment); + + out: + list_del(swp_buffer_lh); + list_add_tail(swp_buffer_lh, &swp_free_buffer_head); + + /* there's no need to swap out the original + * fragment any longer? so, let's forget it */ + if (!CompFragmentIO(fragment)) + return NULL; + + if (wait) + goto get_a_page; } ! /* couldn't free any swap buffer? so let's IO to finish */ ! if (list_empty(&swp_free_buffer_head)) { ! wait = 1; ! goto try_again; ! } get_a_page: ! swp_buffer = list_entry(swp_buffer_lh = swp_free_buffer_head.next, struct swp_buffer, list); ! buffer_page = swp_buffer->page; if (TryLockPage(buffer_page)) BUG(); + + list_del(swp_buffer_lh); + list_add_tail(swp_buffer_lh, &swp_used_buffer_head); ! buffer_page->index = fragment->index; ! swp_buffer->fragment = fragment; ! return (swp_buffer); ! } ! extern void decompress_page(comp_cache_fragment_t *, struct page *); ! static inline struct swp_buffer * decompress_to_swp_buffer(comp_cache_fragment_t * fragment) { ! struct page * buffer_page; ! struct swp_buffer * swp_buffer; ! swp_entry_t entry; ! entry = (swp_entry_t) { fragment->index }; ! swp_buffer = find_free_swp_buffer(fragment); ! /* no need for IO any longer */ ! if (!swp_buffer) ! return NULL; ! ! buffer_page = swp_buffer->page; ! if (!buffer_page) ! BUG(); ! ! if (TryLockPage(fragment->comp_page->page)) ! BUG(); ! #ifdef CONFIG_COMP_SWAP ! memcpy(page_address(buffer_page), page_address(fragment->comp_page->page) + fragment->offset, fragment->compressed_size); ! set_comp_swp_entry(entry, compressed(fragment), fragment_algorithm(fragment)); ! ! if (compressed(fragment) != swap_compressed(entry)) ! BUG(); ! ! if (swap_compressed(entry) && fragment_algorithm(fragment) != swap_algorithm(entry)) ! BUG(); ! #else ! decompress_page(fragment, buffer_page); ! #endif ! ! UnlockPage(fragment->comp_page->page); ! return swp_buffer; } *************** *** 137,142 **** extern struct address_space swapper_space; - extern void decompress_page(comp_cache_fragment_t *, struct page *); - /** * - swap_out_fragment - swap out some pages in the lru order until we --- 157,160 ---- *************** *** 147,154 **** struct list_head * fragment_lh, * next_fragment; int maxscan; ! comp_cache_fragment_t * fragment, * aux_fragment; ! comp_cache_t * comp_page = NULL; struct swp_buffer * swp_buffer; ! struct page * buffer_page, * page; swp_entry_t entry; --- 165,171 ---- struct list_head * fragment_lh, * next_fragment; int maxscan; ! comp_cache_fragment_t * fragment; struct swp_buffer * swp_buffer; ! struct page * page; swp_entry_t entry; *************** *** 161,190 **** entry.val = fragment->index; ! comp_page = fragment->comp_page; ! page = comp_page->page; - /* avoid to free this page in locked state (like what - * can be done in shrink_comp_cache) */ - page_cache_get(page); - - if (CompFragmentFreed(fragment)) { - if (!TryLockPage(page)) - comp_cache_free(fragment); - page_cache_release(page); - maxscan++; - continue; - } - if (vswap_address(entry)) BUG(); - if (!comp_page) - BUG(); - - /* this avoids problems if the swap entry is freed in - * middle of rw_swap_page(). This reference to the - * swap entry is released in end_buffer_io_async */ - swap_duplicate(entry); - /* page locked? move it to the back of the list */ if (TryLockPage(page)) { --- 178,186 ---- entry.val = fragment->index; ! page = fragment->comp_page->page; if (vswap_address(entry)) BUG(); /* page locked? move it to the back of the list */ if (TryLockPage(page)) { *************** *** 192,252 **** list_add(fragment_lh, &lru_queue); maxscan++; ! goto freed; } - - swp_buffer = find_free_swp_buffer(); - buffer_page = swp_buffer->comp_page->page; ! if (!buffer_page) ! BUG(); ! /* race: this is not supposed to happen unless we ! * sleep to lock the page in find_free_swp_buffer() */ ! if (CompFragmentFreed(fragment)) { ! free_swp_buffer(swp_buffer); ! comp_cache_free(fragment); ! goto freed; ! } ! #ifdef CONFIG_COMP_SWAP ! memcpy(page_address(buffer_page), page_address(comp_page->page) + fragment->offset, fragment->compressed_size); ! set_comp_swp_entry(entry, compressed(fragment), fragment_algorithm(fragment)); ! ! if (compressed(fragment) != swap_compressed(entry)) ! BUG(); ! ! if (swap_compressed(entry) && fragment_algorithm(fragment) != swap_algorithm(entry)) ! BUG(); ! #else ! decompress_page(fragment, buffer_page); ! #endif ! ! /* adding this aux_fragment to hash table implies that ! * the page will be found by our find* functions. In ! * particular, any function that tries to lock it will ! * sleep until the lock on this page is released. Even ! * though this page will not be returned by any ! * function, the function will only return when the ! * page is unlocked, ie the IO is over and it's safe ! * to the kernel to read the data from disk.*/ ! aux_fragment = list_entry(swp_buffer->comp_page->fragments.next, comp_cache_fragment_t, list); ! aux_fragment->index = entry.val; ! add_fragment_to_hash_table(aux_fragment); ! comp_cache_free(fragment); ! ! /* to fake the check present in rw_swap_page, the same ! * way is done in rw_swap_page_nolock() */ ! buffer_page->index = entry.val; ! rw_swap_page(WRITE, buffer_page); ! page_cache_release(page); ! continue; ! ! freed: swap_free(entry); - page_cache_release(page); - continue; } } --- 188,211 ---- list_add(fragment_lh, &lru_queue); maxscan++; ! continue; } ! remove_fragment_from_lru_queue(fragment); ! UnlockPage(page); ! /* avoid to free this entry if we sleep in the ! * function below */ ! swap_duplicate(entry); ! swp_buffer = decompress_to_swp_buffer(fragment); ! /* no need for IO */ ! if (!swp_buffer) ! goto out; ! rw_swap_page(WRITE, swp_buffer->page); ! out: swap_free(entry); } } *************** *** 307,318 **** } ! /* the page is locked, forget about it */ ! if (TryLockPage(comp_page->page)) { ! /* let's try the following page that has ! * free_space bigger than what we need */ ! if (comp_page->free_space < PAGE_SIZE) aux_comp_size = comp_page->free_space + 1; ! continue; } /* remove from free space hash table before update */ --- 266,282 ---- } ! aux_comp_size = 0; ! ! while (comp_page && TryLockPage(comp_page->page)) { ! if (aux_comp_size < comp_page->free_space) aux_comp_size = comp_page->free_space + 1; ! ! do { ! comp_page = comp_page->next_hash; ! } while (comp_page && comp_page->free_space < compressed_size); } + + if (!comp_page) + continue; /* remove from free space hash table before update */ *************** *** 393,405 **** BUG(); - update_fragment: - /* free any freed fragments in this comp_page */ - for_each_fragment_safe(fragment_lh, temp_lh, comp_page) { - fragment = list_entry(fragment_lh, comp_cache_fragment_t, list); - - if (CompFragmentFreed(fragment)) - comp_cache_free_nohash(fragment); - } - /* allocate the new fragment */ fragment = alloc_fragment(); --- 357,360 ---- *************** *** 461,501 **** void __init comp_cache_swp_buffer_init(void) { - comp_cache_fragment_t * fragment; - comp_cache_t * comp_page; struct page * buffer_page; int i; INIT_LIST_HEAD(&swp_free_buffer_head); ! INIT_LIST_HEAD(&swp_buffer_head); - swp_buffer = (struct swp_buffer **) kmalloc(NUM_SWP_BUFFERS * sizeof(struct swp_buffer *), GFP_KERNEL); - atomic_set(&number_of_free_swp_buffers, NUM_SWP_BUFFERS); - for (i = 0; i < NUM_SWP_BUFFERS; i++) { ! swp_buffer[i] = (struct swp_buffer *) kmalloc(sizeof(struct swp_buffer), GFP_KERNEL); ! comp_page = swp_buffer[i]->comp_page = alloc_comp_cache(); ! buffer_page = comp_page->page = alloc_page(GFP_KERNEL); - INIT_LIST_HEAD(&(comp_page->fragments)); - if (!buffer_page) panic("comp_cache_swp_buffer_init(): cannot allocate page"); - PageSetCompCache(buffer_page); buffer_page->mapping = &swapper_space; ! ! fragment = alloc_fragment(); ! fragment->comp_page = comp_page; ! fragment->compressed_size = PAGE_SIZE; ! fragment->flags = 0; ! ! comp_page->fragments.next = &fragment->list; ! comp_page->fragments.prev = &fragment->list; ! ! CompFragmentSetSwapBuffer(fragment); ! ! list_add(&(fragment->list), &swp_free_buffer_head); ! list_add(&(swp_buffer[i]->list), &swp_buffer_head); } } --- 416,436 ---- void __init comp_cache_swp_buffer_init(void) { struct page * buffer_page; + struct swp_buffer * swp_buffer; int i; INIT_LIST_HEAD(&swp_free_buffer_head); ! INIT_LIST_HEAD(&swp_used_buffer_head); for (i = 0; i < NUM_SWP_BUFFERS; i++) { ! swp_buffer = (struct swp_buffer *) kmalloc(sizeof(struct swp_buffer), GFP_KERNEL); ! buffer_page = swp_buffer->page = alloc_page(GFP_KERNEL); if (!buffer_page) panic("comp_cache_swp_buffer_init(): cannot allocate page"); buffer_page->mapping = &swapper_space; ! list_add(&(swp_buffer->list), &swp_free_buffer_head); } } Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** vswap.c 2002/01/02 16:59:06 1.14 --- vswap.c 2002/01/10 12:39:31 1.15 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2001-12-31 12:53:42 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-01-08 11:20:31 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 189,197 **** comp_page = fragment->comp_page; ! if (TryLockPage(comp_page->page)) { ! if (CompFragmentTestandSetFreed(fragment)) ! BUG(); ! goto out; ! } comp_cache_free(fragment); --- 189,194 ---- comp_page = fragment->comp_page; ! if (TryLockPage(comp_page->page)) ! BUG(); comp_cache_free(fragment); |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-10 12:39:34
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv10033/mm Removed Files: oom_kill.c Log Message: This batch of changes includes cleanups and code rewrites. The swap_out_fragments() and find_free_swap_buffer() are much simpler and also more efficient. Some preliminary tests showed a performance gain due to these improvements. - Fragment Freed Bit removed: the current code has been checked by and the few parts in the code where we could sleep with a comp page in locked state were rewritten (mainly swap out). It turns out that now we won't free a fragment without locking its page, and thus we don't need a bit to tell that the fragment needs to be freed. All special cases for Freed bit all over the code were removed. - Fragment SwapBuffer Bit removed: things changed a lot in swap buffer code. We don't add a "virtual" fragment to the comp cache hash table to avoid access to fragments which may be in swap out process. So, no need for this bit currently. - Fragment IO Bit added: instead of added a "virtual" fragment to the comp cache hash table, we don't free the fragment we are swapping out right after the IO function (rw_swap_page) has been called. We free it only when the IO has finished. But once the IO takes time, this fragment can be freed (maybe because it's been swapped in) in the meanwhile and so we have to tell the comp_cache_free() that this fragment is being written to disk and that its struct should not be freed. comp_cache_free() also clears this bit since we don't need to perform the IO (if it has not yet been submitted). In the case it has already been submitted, we don't have to free this fragment, only its struct. - find_free_swp_buffer() function was completely rewritten. There are two lists linking all the swap buffer pages: used and free. The comp page field in swp_buffer_t was removed. No array for swap buffers is needed, since they are linked in these lists (it's the first step to make it work with dynamic swap buffers, like listed on todo list). About its behaviour, there's no variable which counts the number of free swap buffers like before. So, once all the buffers were used, we try to move all the unlocked buffers to the free list at once. If we couldn't move even one, them we wait for one page to have its IO finished and go get this swap buffer. Our old code didn't perform this task so efficiently. For example, everytime we had a free fragment, we checked all the fragments to see if they were locked. With a used list we avoid this kind of overhead. - swap_out_fragments() was rewritten too. Part of the code was moved to decompress_to_swp_buffer() function. Since we don't have to worry about Freed bit, the code is much simpler. - find_*_comp_cache() functions don't have special cases for Freed fragments. - once there's no variable number_of_free_swp_buffers and we don't hold a referente to the swap entry in swap_out_fragments(), there's no need to update swap buffers in end_buffer_io_async(). --- oom_kill.c DELETED --- |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-10 12:39:33
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv10033/include/linux Modified Files: comp_cache.h Log Message: This batch of changes includes cleanups and code rewrites. The swap_out_fragments() and find_free_swap_buffer() are much simpler and also more efficient. Some preliminary tests showed a performance gain due to these improvements. - Fragment Freed Bit removed: the current code has been checked by and the few parts in the code where we could sleep with a comp page in locked state were rewritten (mainly swap out). It turns out that now we won't free a fragment without locking its page, and thus we don't need a bit to tell that the fragment needs to be freed. All special cases for Freed bit all over the code were removed. - Fragment SwapBuffer Bit removed: things changed a lot in swap buffer code. We don't add a "virtual" fragment to the comp cache hash table to avoid access to fragments which may be in swap out process. So, no need for this bit currently. - Fragment IO Bit added: instead of added a "virtual" fragment to the comp cache hash table, we don't free the fragment we are swapping out right after the IO function (rw_swap_page) has been called. We free it only when the IO has finished. But once the IO takes time, this fragment can be freed (maybe because it's been swapped in) in the meanwhile and so we have to tell the comp_cache_free() that this fragment is being written to disk and that its struct should not be freed. comp_cache_free() also clears this bit since we don't need to perform the IO (if it has not yet been submitted). In the case it has already been submitted, we don't have to free this fragment, only its struct. - find_free_swp_buffer() function was completely rewritten. There are two lists linking all the swap buffer pages: used and free. The comp page field in swp_buffer_t was removed. No array for swap buffers is needed, since they are linked in these lists (it's the first step to make it work with dynamic swap buffers, like listed on todo list). About its behaviour, there's no variable which counts the number of free swap buffers like before. So, once all the buffers were used, we try to move all the unlocked buffers to the free list at once. If we couldn't move even one, them we wait for one page to have its IO finished and go get this swap buffer. Our old code didn't perform this task so efficiently. For example, everytime we had a free fragment, we checked all the fragments to see if they were locked. With a used list we avoid this kind of overhead. - swap_out_fragments() was rewritten too. Part of the code was moved to decompress_to_swp_buffer() function. Since we don't have to worry about Freed bit, the code is much simpler. - find_*_comp_cache() functions don't have special cases for Freed fragments. - once there's no variable number_of_free_swp_buffers and we don't hold a referente to the swap entry in swap_out_fragments(), there's no need to update swap buffers in end_buffer_io_async(). Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.39 retrieving revision 1.40 diff -C2 -r1.39 -r1.40 *** comp_cache.h 2002/01/07 17:48:29 1.39 --- comp_cache.h 2002/01/10 12:39:30 1.40 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-07 15:33:53 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-08 16:09:01 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 110,143 **** extern atomic_t number_of_free_swp_buffers; - #ifdef CONFIG_COMP_CACHE - - static inline void update_swap_buffers(struct page * page) { - if (PageCompCache(page)) { - swap_free((swp_entry_t) { page->index }); - atomic_inc(&number_of_free_swp_buffers); - } - } - - #else - - static inline void update_swap_buffers(struct page * page) {}; - - #endif - /* comp fragment entry flags */ ! #define CF_Freed 0 ! #define CF_SwapBuffer 1 #define CF_WKdm 2 #define CF_WK4x4 3 ! #define CompFragmentFreed(fragment) test_bit(CF_Freed, &(fragment)->flags) ! #define CompFragmentSetFreed(fragment) set_bit(CF_Freed, &(fragment)->flags) ! #define CompFragmentTestandSetFreed(fragment) test_and_set_bit(CF_Freed, &(fragment)->flags) ! #define CompFragmentClearFreed(fragment) clear_bit(CF_Freed, &(fragment)->flags) ! ! #define CompFragmentSwapBuffer(fragment) test_bit(CF_SwapBuffer, &(fragment)->flags) ! #define CompFragmentSetSwapBuffer(fragment) set_bit(CF_SwapBuffer, &(fragment)->flags) ! #define CompFragmentTestandSetSwapBuffer(fragment) test_and_set_bit(CF_SwapBuffer, &(fragment)->flags) ! #define CompFragmentClearSwapBuffer(fragment) clear_bit(CF_SwapBuffer, &(fragment)->flags) #define CompFragmentWKdm(fragment) test_bit(CF_WKdm, &(fragment)->flags) --- 110,122 ---- extern atomic_t number_of_free_swp_buffers; /* comp fragment entry flags */ ! #define CF_IO 1 #define CF_WKdm 2 #define CF_WK4x4 3 ! #define CompFragmentIO(fragment) test_bit(CF_IO, &(fragment)->flags) ! #define CompFragmentSetIO(fragment) set_bit(CF_IO, &(fragment)->flags) ! #define CompFragmentTestandSetIO(fragment) test_and_set_bit(CF_IO, &(fragment)->flags) ! #define CompFragmentClearIO(fragment) clear_bit(CF_IO, &(fragment)->flags) #define CompFragmentWKdm(fragment) test_bit(CF_WKdm, &(fragment)->flags) *************** *** 159,164 **** /* do not change the fields order */ struct swp_buffer { - comp_cache_t * comp_page; struct list_head list; }; --- 138,145 ---- /* do not change the fields order */ struct swp_buffer { struct list_head list; + + struct page * page; /* page for IO */ + comp_cache_fragment_t * fragment; /* pointer to the fragment we are doing IO */ }; |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-10 12:39:33
|
Update of /cvsroot/linuxcompressed/linux/fs In directory usw-pr-cvs1:/tmp/cvs-serv10033/fs Removed Files: buffer.c Log Message: This batch of changes includes cleanups and code rewrites. The swap_out_fragments() and find_free_swap_buffer() are much simpler and also more efficient. Some preliminary tests showed a performance gain due to these improvements. - Fragment Freed Bit removed: the current code has been checked by and the few parts in the code where we could sleep with a comp page in locked state were rewritten (mainly swap out). It turns out that now we won't free a fragment without locking its page, and thus we don't need a bit to tell that the fragment needs to be freed. All special cases for Freed bit all over the code were removed. - Fragment SwapBuffer Bit removed: things changed a lot in swap buffer code. We don't add a "virtual" fragment to the comp cache hash table to avoid access to fragments which may be in swap out process. So, no need for this bit currently. - Fragment IO Bit added: instead of added a "virtual" fragment to the comp cache hash table, we don't free the fragment we are swapping out right after the IO function (rw_swap_page) has been called. We free it only when the IO has finished. But once the IO takes time, this fragment can be freed (maybe because it's been swapped in) in the meanwhile and so we have to tell the comp_cache_free() that this fragment is being written to disk and that its struct should not be freed. comp_cache_free() also clears this bit since we don't need to perform the IO (if it has not yet been submitted). In the case it has already been submitted, we don't have to free this fragment, only its struct. - find_free_swp_buffer() function was completely rewritten. There are two lists linking all the swap buffer pages: used and free. The comp page field in swp_buffer_t was removed. No array for swap buffers is needed, since they are linked in these lists (it's the first step to make it work with dynamic swap buffers, like listed on todo list). About its behaviour, there's no variable which counts the number of free swap buffers like before. So, once all the buffers were used, we try to move all the unlocked buffers to the free list at once. If we couldn't move even one, them we wait for one page to have its IO finished and go get this swap buffer. Our old code didn't perform this task so efficiently. For example, everytime we had a free fragment, we checked all the fragments to see if they were locked. With a used list we avoid this kind of overhead. - swap_out_fragments() was rewritten too. Part of the code was moved to decompress_to_swp_buffer() function. Since we don't have to worry about Freed bit, the code is much simpler. - find_*_comp_cache() functions don't have special cases for Freed fragments. - once there's no variable number_of_free_swp_buffers and we don't hold a referente to the swap entry in swap_out_fragments(), there's no need to update swap buffers in end_buffer_io_async(). --- buffer.c DELETED --- |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-07 17:48:33
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv31165/include/linux Modified Files: comp_cache.h Log Message: More changes regarding performance optimization. - avl tree is finally removed. Now there's a new hash table for compressed cache entries, what improved our cache performance. comp_cache_struct size was decreased by 10 bytes with this change (actually 8 due to C struct padding). - search for duplicated fragments in get_comp_cache_page() was deleted since it does not make sense any longer. Calls to _find_nolock_comp_page() have fallen dramatically. - comp_cache_notree() was renamed to comp_cache_nohash(). Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.38 retrieving revision 1.39 diff -C2 -r1.38 -r1.39 *** comp_cache.h 2002/01/04 22:24:07 1.38 --- comp_cache.h 2002/01/07 17:48:29 1.39 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-04 19:31:48 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-07 15:33:53 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 36,46 **** extern unsigned long real_num_comp_pages, new_num_comp_pages, max_num_comp_pages; - struct avl_info { - short comp_avl_height; - struct comp_cache_struct * comp_avl_left; - struct comp_cache_struct * comp_avl_right; - struct list_head avl_list; - }; - struct pte_list { struct pte_list * next; --- 36,39 ---- *************** *** 75,83 **** short free_space; - struct avl_info avl_free_space; struct list_head fragments; unsigned long flags; } comp_cache_t; --- 68,78 ---- short free_space; struct list_head fragments; unsigned long flags; + + struct comp_cache_struct * next_hash; + struct comp_cache_struct ** pprev_hash; } comp_cache_t; *************** *** 129,150 **** #endif - - /* avl.c */ - extern comp_cache_t * comp_avl_free_space; - - void comp_avl_insert (comp_cache_t *, comp_cache_t **, unsigned long, unsigned long); - int comp_avl_remove (comp_cache_t *, comp_cache_t **, unsigned long, unsigned long); - comp_cache_t * comp_search_avl_tree(unsigned short, comp_cache_t *, unsigned long, unsigned long); - - #define search_avl_tree_free_space(size) \ - comp_search_avl_tree(size, comp_avl_free_space, member_offset(free_space), member_offset(avl_free_space)) - - #define avl_insert_free_space(comp_page) \ - comp_avl_insert(comp_page, &comp_avl_free_space, member_offset(free_space), member_offset(avl_free_space)) - - #define avl_remove_free_space(comp_page) \ - comp_avl_remove(comp_page, &comp_avl_free_space, member_offset(free_space), member_offset(avl_free_space)) ! /* Comp Fragment Entry Flags */ #define CF_Freed 0 #define CF_SwapBuffer 1 --- 124,129 ---- #endif ! /* comp fragment entry flags */ #define CF_Freed 0 #define CF_SwapBuffer 1 *************** *** 494,497 **** --- 473,493 ---- inline void add_fragment_to_hash_table(comp_cache_fragment_t *); inline void remove_fragment_from_hash_table(comp_cache_fragment_t *); + + #define FREE_SPACE_INTERVAL 100 + #define FREE_SPACE_HASH_SIZE ((int) (PAGE_SIZE/FREE_SPACE_INTERVAL) + 2) + static inline int free_space_hashfn(int free_space) + { + if (!free_space) + return 0; + + free_space -= (free_space % FREE_SPACE_INTERVAL); + + return (free_space/FREE_SPACE_INTERVAL + 1); + } + + inline void add_comp_page_to_hash_table(comp_cache_t *); + inline void remove_comp_page_from_hash_table(comp_cache_t *); + comp_cache_t * search_comp_page_free_space(int); + extern struct list_head lru_queue; |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-07 17:48:33
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv31165/mm/comp_cache Modified Files: Makefile adaptivity.c aux.c free.c main.c swapout.c Removed Files: avl.c Log Message: More changes regarding performance optimization. - avl tree is finally removed. Now there's a new hash table for compressed cache entries, what improved our cache performance. comp_cache_struct size was decreased by 10 bytes with this change (actually 8 due to C struct padding). - search for duplicated fragments in get_comp_cache_page() was deleted since it does not make sense any longer. Calls to _find_nolock_comp_page() have fallen dramatically. - comp_cache_notree() was renamed to comp_cache_nohash(). Index: Makefile =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/Makefile,v retrieving revision 1.4 retrieving revision 1.5 diff -C2 -r1.4 -r1.5 *** Makefile 2001/12/28 21:45:24 1.4 --- Makefile 2002/01/07 17:48:29 1.5 *************** *** 5,9 **** O_TARGET := comp_cache.o ! obj-y := main.o vswap.o free.o swapout.o swapin.o adaptivity.o avl.o aux.o proc.o WK4x4.o WKdm.o ifeq ($(CONFIG_COMP_SWAP),y) --- 5,9 ---- O_TARGET := comp_cache.o ! obj-y := main.o vswap.o free.o swapout.o swapin.o adaptivity.o aux.o proc.o WK4x4.o WKdm.o ifeq ($(CONFIG_COMP_SWAP),y) Index: adaptivity.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v retrieving revision 1.2 retrieving revision 1.3 diff -C2 -r1.2 -r1.3 *** adaptivity.c 2002/01/02 16:59:05 1.2 --- adaptivity.c 2002/01/07 17:48:29 1.3 *************** *** 2,6 **** * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-02 14:42:12 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/adaptivity.c * ! * Time-stamp: <2002-01-07 11:18:58 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 53,57 **** check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_avl_tree_free_space(PAGE_SIZE); if (!empty_comp_page) --- 53,57 ---- check_empty_pages: /* let's look for empty compressed cache entries */ ! empty_comp_page = search_comp_page_free_space(PAGE_SIZE); if (!empty_comp_page) *************** *** 66,71 **** } ! if (!avl_remove_free_space(empty_comp_page)) ! BUG(); goto shrink; --- 66,70 ---- } ! remove_comp_page_from_hash_table(empty_comp_page); goto shrink; Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.9 retrieving revision 1.10 diff -C2 -r1.9 -r1.10 *** aux.c 2002/01/04 22:24:07 1.9 --- aux.c 2002/01/07 17:48:29 1.10 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-04 19:10:26 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-07 14:44:05 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 15,19 **** --- 15,21 ---- comp_cache_fragment_t * fragment_hash[FRAGMENT_HASH_SIZE]; + comp_cache_t * free_space_hash[FREE_SPACE_HASH_SIZE]; + /* computes (unsigned long long x) / (unsigned long long y) */ unsigned long long *************** *** 87,90 **** --- 89,149 ---- inline void + add_comp_page_to_hash_table(comp_cache_t * new_comp_page) { + comp_cache_t ** comp_page; + + comp_page = &free_space_hash[free_space_hashfn(new_comp_page->free_space)]; + + if ((new_comp_page->next_hash = *comp_page)) + (*comp_page)->pprev_hash = &new_comp_page->next_hash; + + *comp_page = new_comp_page; + new_comp_page->pprev_hash = comp_page; + } + + inline void + remove_comp_page_from_hash_table(comp_cache_t * comp_page) { + comp_cache_t *next = comp_page->next_hash; + comp_cache_t **pprev = comp_page->pprev_hash; + + if (next) + next->pprev_hash = pprev; + *pprev = next; + comp_page->pprev_hash = NULL; + } + + comp_cache_t * + search_comp_page_free_space(int free_space) { + comp_cache_t * comp_page; + int idx, i; + + idx = free_space_hashfn(free_space); + + if (idx == FREE_SPACE_HASH_SIZE) + goto check_exact_size; + + /* first of all let's try to get at once a comp page whose + * free space is surely bigger than what need */ + i = idx + 1; + do { + comp_page = free_space_hash[i++]; + } while(i < FREE_SPACE_HASH_SIZE && !comp_page); + + /* couldn't find a page? let's check the pages whose free + * space is linked in our hash key entry */ + if (!comp_page) + goto check_exact_size; + + return comp_page; + + check_exact_size: + comp_page = free_space_hash[idx]; + + while (comp_page && comp_page->free_space < free_space) + comp_page = comp_page->next_hash; + + return comp_page; + } + + inline void add_fragment_to_lru_queue(comp_cache_fragment_t * fragment) { swp_entry_t entry; *************** *** 98,102 **** return; ! list_add_tail(&(fragment->lru_queue), &lru_queue); } --- 157,161 ---- return; ! list_add(&(fragment->lru_queue), &lru_queue); } *************** *** 377,380 **** --- 436,443 ---- for (i = 0; i < FRAGMENT_HASH_SIZE; i++) fragment_hash[i] = NULL; + + /* inits comp cache free space hash table */ + for (i = 0; i < FREE_SPACE_HASH_SIZE; i++) + free_space_hash[i] = NULL; } Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** free.c 2002/01/02 16:59:05 1.13 --- free.c 2002/01/07 17:48:29 1.14 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2001-12-31 12:38:32 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2002-01-07 12:39:59 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 32,36 **** void ! comp_cache_free_notree(comp_cache_fragment_t * fragment_to_free) { comp_cache_t * comp_page = fragment_to_free->comp_page; --- 32,36 ---- void ! comp_cache_free_nohash(comp_cache_fragment_t * fragment_to_free) { comp_cache_t * comp_page = fragment_to_free->comp_page; *************** *** 156,164 **** remove_fragment_from_lru_queue(fragment_to_free); - /* let's null the index to make sure any old reference to this - * fragment will fail when checking its index, like when - * waiting for the comp_page->page lock in - * swap_out_fragments */ - fragment_to_free->index = 0; kmem_cache_free(fragment_cachep, (fragment_to_free)); --- 156,159 ---- *************** *** 186,199 **** BUG(); ! /* remove from avl tree, since we have to update its ! * free_space */ ! if (!avl_remove_free_space(comp_page)) ! BUG(); ! comp_cache_free_notree(fragment); /* steal the page if we need to shrink the comp cache */ ! if (!shrink_comp_cache(comp_page)) { ! avl_insert_free_space(comp_page); UnlockPage(comp_page->page); } --- 181,193 ---- BUG(); ! /* remove from the free space hash table to update it */ ! remove_comp_page_from_hash_table(comp_page); ! /* effectively free it */ ! comp_cache_free_nohash(fragment); /* steal the page if we need to shrink the comp cache */ ! if (!shrink_comp_cache(comp_page)) { ! add_comp_page_to_hash_table(comp_page); UnlockPage(comp_page->page); } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.15 retrieving revision 1.16 diff -C2 -r1.15 -r1.16 *** main.c 2002/01/04 22:24:07 1.15 --- main.c 2002/01/07 17:48:29 1.16 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-04 11:44:44 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-07 11:44:08 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 161,168 **** PageSetCompCache((*comp_page)->page); - INIT_LIST_HEAD(&((*comp_page)->avl_free_space.avl_list)); INIT_LIST_HEAD(&((*comp_page)->fragments)); ! ! avl_insert_free_space((*comp_page)); } --- 161,167 ---- PageSetCompCache((*comp_page)->page); INIT_LIST_HEAD(&((*comp_page)->fragments)); ! ! add_comp_page_to_hash_table((*comp_page)); } *************** *** 183,187 **** min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre2 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ --- 182,186 ---- min_num_comp_pages = 0; ! printk("Starting compressed cache v0.21pre4 (%lu pages = %luk)\n", max_num_comp_pages, (max_num_comp_pages * PAGE_SIZE)/1024); /* initialize our data for the `test' compressed_page */ *************** *** 207,213 **** comp_cache_vswap_init(); - /* avl tree */ - comp_avl_free_space = NULL; - /* initialize each comp cache entry */ for (i = 0; i < real_num_comp_pages; i++) { --- 206,209 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** swapout.c 2002/01/04 22:24:07 1.14 --- swapout.c 2002/01/07 17:48:29 1.15 *************** *** 2,6 **** * linux/mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-03 17:24:51 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-07 15:33:19 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 158,162 **** while (!list_empty(&lru_queue) && maxscan--) { ! fragment = list_entry(fragment_lh = lru_queue.next, comp_cache_fragment_t, lru_queue); entry.val = fragment->index; --- 158,162 ---- while (!list_empty(&lru_queue) && maxscan--) { ! fragment = list_entry(fragment_lh = lru_queue.prev, comp_cache_fragment_t, lru_queue); entry.val = fragment->index; *************** *** 190,194 **** if (TryLockPage(page)) { list_del(fragment_lh); ! list_add_tail(fragment_lh, &lru_queue); maxscan++; goto freed; --- 190,194 ---- if (TryLockPage(page)) { list_del(fragment_lh); ! list_add(fragment_lh, &lru_queue); maxscan++; goto freed; *************** *** 252,256 **** } ! extern void comp_cache_free_notree(comp_cache_fragment_t *); /** --- 252,256 ---- } ! extern void comp_cache_free_nohash(comp_cache_fragment_t *); /** *************** *** 269,274 **** { struct list_head * fragment_lh = NULL, * temp_lh; ! comp_cache_t * comp_page = NULL, * dup_comp_page; ! comp_cache_fragment_t * fragment = NULL, * dup_fragment; swp_entry_t entry; unsigned short aux_comp_size; --- 269,274 ---- { struct list_head * fragment_lh = NULL, * temp_lh; ! comp_cache_t * comp_page = NULL; ! comp_cache_fragment_t * fragment = NULL; swp_entry_t entry; unsigned short aux_comp_size; *************** *** 295,299 **** while (maxscan--) { ! comp_page = search_avl_tree_free_space(aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ --- 295,299 ---- while (maxscan--) { ! comp_page = search_comp_page_free_space(aux_comp_size); /* no comp_page, that comp_page->free_space > compressed_size */ *************** *** 316,322 **** } ! /* remove from AVL tree before updating */ ! if (!avl_remove_free_space(comp_page)) ! BUG(); if (comp_page->free_space < compressed_size) --- 316,321 ---- } ! /* remove from free space hash table before update */ ! remove_comp_page_from_hash_table(comp_page); if (comp_page->free_space < compressed_size) *************** *** 353,358 **** new_page: ! /* remove from AVL tree before updating*/ ! avl_remove_free_space(comp_page); if (comp_page->page) --- 352,357 ---- new_page: ! /* remove from free space hash table before update */ ! remove_comp_page_from_hash_table(comp_page); if (comp_page->page) *************** *** 386,390 **** * */ if (mapped(swap_cache_page)) { ! avl_insert_free_space(comp_page); UnlockPage(comp_page->page); return NULL; --- 385,389 ---- * */ if (mapped(swap_cache_page)) { ! add_comp_page_to_hash_table(comp_page); UnlockPage(comp_page->page); return NULL; *************** *** 394,418 **** BUG(); - /* before messing up the page, let's make sure there's other - * fragment with this entry. It can happen to be other - * compressed page with this entry, since we may have had a - * swap cache page with page_count > 2 (linus if clause - * above), we have cleaned it and later it has been dirtied - * again (since it may have returned to active list) */ - dup_comp_page = find_nolock_comp_page((swp_entry_t) { swap_cache_page->index }, &dup_fragment); - - /* there is another fragment, that should never happen */ - if (dup_comp_page) { - /* the fragment is in this comp_page or we couldn't lock it */ - if (dup_comp_page == comp_page || TryLockPage(dup_comp_page->page)) { - if (CompFragmentTestandSetFreed(dup_fragment)) - BUG(); - - goto update_fragment; - } - - comp_cache_free(dup_fragment); - } - update_fragment: /* free any freed fragments in this comp_page */ --- 393,396 ---- *************** *** 421,425 **** if (CompFragmentFreed(fragment)) ! comp_cache_free_notree(fragment); } --- 399,403 ---- if (CompFragmentFreed(fragment)) ! comp_cache_free_nohash(fragment); } *************** *** 471,475 **** *fragment_out = fragment; ! avl_insert_free_space(comp_page); if ((*fragment_out)->compressed_size != compressed_size) --- 449,453 ---- *fragment_out = fragment; ! add_comp_page_to_hash_table(comp_page); if ((*fragment_out)->compressed_size != compressed_size) --- avl.c DELETED --- |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-04 22:24:10
|
Update of /cvsroot/linuxcompressed/linux/Documentation In directory usw-pr-cvs1:/tmp/cvs-serv25332/Documentation Modified Files: Configure.help Log Message: Some changes regarding code optimization. These modifications have improved a lot some tests (fillmem) we performed to test our current code. - hash function changed to be arch-independent (by using SWP_OFFSET) and also has been improved (shifting one position to the left). - hash table size is in function of COMP_CACHE_CACHE_SIZE. It was set to a static value and that hurts performance pretty badly for bigger sizes of compressed cache. This change breaks the kernel parameter compsize=, but I don't know if it's worth to add a variable to hold the hash table size since the macro that uses this value (fragment_hashfn) is called many times. - now the option in kernel config menu warns about the need of having a maximum size of compressed cache as a power of two number for this hash table size. If it's not a power of two, it breaks the hash function and the performance drops heavily. To solve this problem, we could add a variable that would be set to a reasonable value (a power of two) and in function of the maximum size of the compressed cache, but there's the problem explained above. - swap_out_fragments() never sleeps to lock a page. If it couldn't be locked at once, move it to the end of the LRU list and tries to lock the next. - removed checksum from the comp_cache_fragment_t and checksum computation from get_comp_cache_page() and decompress_page(). It added a huge overhead in our tests. Index: Configure.help =================================================================== RCS file: /cvsroot/linuxcompressed/linux/Documentation/Configure.help,v retrieving revision 1.1 retrieving revision 1.2 diff -C2 -r1.1 -r1.2 *** Configure.help 2002/01/02 16:59:05 1.1 --- Configure.help 2002/01/04 22:24:06 1.2 *************** *** 400,404 **** Here you choose the maximum number of memory pages used by the Compressed Cache. If the number is greater than half of memory size, ! it will set to 512, the default value. The maximum value will be not necessarily used and can be configured --- 400,405 ---- Here you choose the maximum number of memory pages used by the Compressed Cache. If the number is greater than half of memory size, ! it will set to 512, the default value. The number must be a power of ! two. The maximum value will be not necessarily used and can be configured |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-04 22:24:10
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv25332/include/linux Modified Files: comp_cache.h Log Message: Some changes regarding code optimization. These modifications have improved a lot some tests (fillmem) we performed to test our current code. - hash function changed to be arch-independent (by using SWP_OFFSET) and also has been improved (shifting one position to the left). - hash table size is in function of COMP_CACHE_CACHE_SIZE. It was set to a static value and that hurts performance pretty badly for bigger sizes of compressed cache. This change breaks the kernel parameter compsize=, but I don't know if it's worth to add a variable to hold the hash table size since the macro that uses this value (fragment_hashfn) is called many times. - now the option in kernel config menu warns about the need of having a maximum size of compressed cache as a power of two number for this hash table size. If it's not a power of two, it breaks the hash function and the performance drops heavily. To solve this problem, we could add a variable that would be set to a reasonable value (a power of two) and in function of the maximum size of the compressed cache, but there's the problem explained above. - swap_out_fragments() never sleeps to lock a page. If it couldn't be locked at once, move it to the end of the LRU list and tries to lock the next. - removed checksum from the comp_cache_fragment_t and checksum computation from get_comp_cache_page() and decompress_page(). It added a huge overhead in our tests. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.37 retrieving revision 1.38 diff -C2 -r1.37 -r1.38 *** comp_cache.h 2002/01/02 16:59:05 1.37 --- comp_cache.h 2002/01/04 22:24:07 1.38 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-02 12:06:10 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2002-01-04 19:31:48 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 29,32 **** --- 29,33 ---- #include <linux/WKcommon.h> + /* maximum compressed size of a page */ #define MAX_COMPRESSED_SIZE 4500 *************** *** 58,62 **** /* index == 0 && compressed_size != 0 => compressed_size = free_space */ unsigned short compressed_size; - unsigned int checksum; unsigned long flags; --- 59,62 ---- *************** *** 489,494 **** inline void check_all_fragments(comp_cache_t *); ! #define HTABLE_SIZE 32 ! #define hashfn(entry) ((entry >> 9) & (HTABLE_SIZE - 1)) inline void add_fragment_to_hash_table(comp_cache_fragment_t *); --- 489,494 ---- inline void check_all_fragments(comp_cache_t *); ! #define FRAGMENT_HASH_SIZE (CONFIG_COMP_CACHE_SIZE/8) ! #define fragment_hashfn(entry) ((SWP_OFFSET(entry) >> 2) & (FRAGMENT_HASH_SIZE - 1)) inline void add_fragment_to_hash_table(comp_cache_fragment_t *); |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-04 22:24:10
|
Update of /cvsroot/linuxcompressed/linux/arch/i386 In directory usw-pr-cvs1:/tmp/cvs-serv25332/arch/i386 Modified Files: config.in Log Message: Some changes regarding code optimization. These modifications have improved a lot some tests (fillmem) we performed to test our current code. - hash function changed to be arch-independent (by using SWP_OFFSET) and also has been improved (shifting one position to the left). - hash table size is in function of COMP_CACHE_CACHE_SIZE. It was set to a static value and that hurts performance pretty badly for bigger sizes of compressed cache. This change breaks the kernel parameter compsize=, but I don't know if it's worth to add a variable to hold the hash table size since the macro that uses this value (fragment_hashfn) is called many times. - now the option in kernel config menu warns about the need of having a maximum size of compressed cache as a power of two number for this hash table size. If it's not a power of two, it breaks the hash function and the performance drops heavily. To solve this problem, we could add a variable that would be set to a reasonable value (a power of two) and in function of the maximum size of the compressed cache, but there's the problem explained above. - swap_out_fragments() never sleeps to lock a page. If it couldn't be locked at once, move it to the end of the LRU list and tries to lock the next. - removed checksum from the comp_cache_fragment_t and checksum computation from get_comp_cache_page() and decompress_page(). It added a huge overhead in our tests. Index: config.in =================================================================== RCS file: /cvsroot/linuxcompressed/linux/arch/i386/config.in,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -r1.11 -r1.12 *** config.in 2001/12/28 21:45:24 1.11 --- config.in 2002/01/04 22:24:06 1.12 *************** *** 201,205 **** dep_bool ' Swap Out in Compressed Format (Null Padding)' CONFIG_COMP_SWAP $CONFIG_COMP_CACHE if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! int 'Maximum Compressed Cache Size (Memory Pages)' CONFIG_COMP_CACHE_SIZE 512 fi --- 201,205 ---- dep_bool ' Swap Out in Compressed Format (Null Padding)' CONFIG_COMP_SWAP $CONFIG_COMP_CACHE if [ "$CONFIG_COMP_CACHE" = "y" ]; then ! int 'Maximum Number of Compressed Pages - (Power of 2)' CONFIG_COMP_CACHE_SIZE 512 fi |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-04 22:24:10
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv25332/mm/comp_cache Modified Files: aux.c main.c swapin.c swapout.c Log Message: Some changes regarding code optimization. These modifications have improved a lot some tests (fillmem) we performed to test our current code. - hash function changed to be arch-independent (by using SWP_OFFSET) and also has been improved (shifting one position to the left). - hash table size is in function of COMP_CACHE_CACHE_SIZE. It was set to a static value and that hurts performance pretty badly for bigger sizes of compressed cache. This change breaks the kernel parameter compsize=, but I don't know if it's worth to add a variable to hold the hash table size since the macro that uses this value (fragment_hashfn) is called many times. - now the option in kernel config menu warns about the need of having a maximum size of compressed cache as a power of two number for this hash table size. If it's not a power of two, it breaks the hash function and the performance drops heavily. To solve this problem, we could add a variable that would be set to a reasonable value (a power of two) and in function of the maximum size of the compressed cache, but there's the problem explained above. - swap_out_fragments() never sleeps to lock a page. If it couldn't be locked at once, move it to the end of the LRU list and tries to lock the next. - removed checksum from the comp_cache_fragment_t and checksum computation from get_comp_cache_page() and decompress_page(). It added a huge overhead in our tests. Index: aux.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v retrieving revision 1.8 retrieving revision 1.9 diff -C2 -r1.8 -r1.9 *** aux.c 2001/12/20 13:24:32 1.8 --- aux.c 2002/01/04 22:24:07 1.9 *************** *** 2,6 **** * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2001-12-20 10:33:21 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/aux.c * ! * Time-stamp: <2002-01-04 19:10:26 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 14,18 **** #include <linux/init.h> ! comp_cache_fragment_t * fragment_hash[HTABLE_SIZE]; /* computes (unsigned long long x) / (unsigned long long y) */ --- 14,18 ---- #include <linux/init.h> ! comp_cache_fragment_t * fragment_hash[FRAGMENT_HASH_SIZE]; /* computes (unsigned long long x) / (unsigned long long y) */ *************** *** 62,67 **** add_fragment_to_hash_table(comp_cache_fragment_t * new_fragment) { comp_cache_fragment_t ** fragment; ! fragment = &fragment_hash[hashfn(new_fragment->index)]; if ((new_fragment->next_hash = *fragment)) --- 62,70 ---- add_fragment_to_hash_table(comp_cache_fragment_t * new_fragment) { comp_cache_fragment_t ** fragment; + swp_entry_t entry; + + entry = (swp_entry_t) { new_fragment->index }; ! fragment = &fragment_hash[fragment_hashfn(entry)]; if ((new_fragment->next_hash = *fragment)) *************** *** 120,124 **** *fragment_out = NULL; ! for (fragment = fragment_hash[hashfn(entry.val)]; fragment != NULL; fragment = fragment->next_hash) { if (fragment->index == entry.val && !CompFragmentFreed(fragment)) { *fragment_out = fragment; --- 123,127 ---- *fragment_out = NULL; ! for (fragment = fragment_hash[fragment_hashfn(entry)]; fragment != NULL; fragment = fragment->next_hash) { if (fragment->index == entry.val && !CompFragmentFreed(fragment)) { *fragment_out = fragment; *************** *** 372,376 **** /* inits fragment hash table */ ! for (i = 0; i < HTABLE_SIZE; i++) fragment_hash[i] = NULL; } --- 375,379 ---- /* inits fragment hash table */ ! for (i = 0; i < FRAGMENT_HASH_SIZE; i++) fragment_hash[i] = NULL; } Index: main.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v retrieving revision 1.14 retrieving revision 1.15 diff -C2 -r1.14 -r1.15 *** main.c 2002/01/02 16:59:05 1.14 --- main.c 2002/01/04 22:24:07 1.15 *************** *** 2,6 **** * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-02 10:33:08 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/main.c * ! * Time-stamp: <2002-01-04 11:44:44 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 100,104 **** } - fragment->checksum = csum_partial(page_address(swap_cache_page), PAGE_SIZE, 0); set_fragment_algorithm(fragment, algorithm); --- 100,103 ---- Index: swapin.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v retrieving revision 1.11 retrieving revision 1.12 diff -C2 -r1.11 -r1.12 *** swapin.c 2002/01/02 16:59:06 1.11 --- swapin.c 2002/01/04 22:24:07 1.12 *************** *** 2,6 **** * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2001-12-31 12:39:11 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapin.c * ! * Time-stamp: <2002-01-04 11:44:53 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 40,46 **** decompress(fragment_algorithm(fragment), page_address(comp_page->page) + fragment->offset, page_address(uncompressed_page)); - if (fragment->checksum != csum_partial(page_address(uncompressed_page), PAGE_SIZE, 0)) - BUG(); - return; --- 40,43 ---- Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** swapout.c 2002/01/02 16:59:06 1.13 --- swapout.c 2002/01/04 22:24:07 1.14 *************** *** 2,6 **** * linux/mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-02 12:06:04 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/swapout.c * ! * Time-stamp: <2002-01-03 17:24:51 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 146,150 **** swap_out_fragments(void) { struct list_head * fragment_lh, * next_fragment; ! int maxscan, not_found; comp_cache_fragment_t * fragment, * aux_fragment; comp_cache_t * comp_page = NULL; --- 146,150 ---- swap_out_fragments(void) { struct list_head * fragment_lh, * next_fragment; ! int maxscan; comp_cache_fragment_t * fragment, * aux_fragment; comp_cache_t * comp_page = NULL; *************** *** 157,162 **** next_fragment = &lru_queue; ! while (!list_empty(&lru_queue) && maxscan--) { ! fragment = list_entry(lru_queue.next, comp_cache_fragment_t, lru_queue); entry.val = fragment->index; --- 157,162 ---- next_fragment = &lru_queue; ! while (!list_empty(&lru_queue) && maxscan--) { ! fragment = list_entry(fragment_lh = lru_queue.next, comp_cache_fragment_t, lru_queue); entry.val = fragment->index; *************** *** 172,175 **** --- 172,176 ---- comp_cache_free(fragment); page_cache_release(page); + maxscan++; continue; } *************** *** 186,209 **** swap_duplicate(entry); ! lock_page(page); ! ! /* the fragment might have been freed while we slept ! * for the lock above */ ! not_found = 1; ! for_each_fragment(fragment_lh, comp_page) ! if (list_entry(fragment_lh, comp_cache_fragment_t, list) == fragment) ! not_found = 0; ! ! /* if the fragment have been freed, forget it (even it ! * has been reallocated to the same comp page, the ! * fragment->index will be different because we hold a ! * reference on the swp_entry */ ! if (not_found || fragment->index != entry.val) { ! next_fragment = &lru_queue; ! maxscan = 10; ! UnlockPage(page); goto freed; } ! swp_buffer = find_free_swp_buffer(); buffer_page = swp_buffer->comp_page->page; --- 187,198 ---- swap_duplicate(entry); ! /* page locked? move it to the back of the list */ ! if (TryLockPage(page)) { ! list_del(fragment_lh); ! list_add_tail(fragment_lh, &lru_queue); ! maxscan++; goto freed; } ! swp_buffer = find_free_swp_buffer(); buffer_page = swp_buffer->comp_page->page; *************** *** 213,217 **** /* race: this is not supposed to happen unless we ! * sleep to lock the page in find_free_swp_buffer */ if (CompFragmentFreed(fragment)) { free_swp_buffer(swp_buffer); --- 202,206 ---- /* race: this is not supposed to happen unless we ! * sleep to lock the page in find_free_swp_buffer() */ if (CompFragmentFreed(fragment)) { free_swp_buffer(swp_buffer); |
From: Rodrigo S. de C. <rc...@us...> - 2002-01-02 16:59:10
|
Update of /cvsroot/linuxcompressed/linux/mm In directory usw-pr-cvs1:/tmp/cvs-serv21198/mm Modified Files: page_alloc.c vmscan.c Log Message: - shrink_comp_cache() now checks empty comp cache entries and frees them if the comp cache needs to be shrunk. This check occurs even if the comp page we pass as parameter is not freeable for the cache shrink. - grow_comp_cache() now grows the comp cache many pages at once. It will grow half the pages freed in try_to_free_pages() if we need to grow the cache. Only pages from normal zone are taken into account since those are the kind of pages we allocate. - added myself to MAINTAINERS file and also wrote help text to compressed cache config option in Documentation/Configure.help Index: page_alloc.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v retrieving revision 1.13 retrieving revision 1.14 diff -C2 -r1.13 -r1.14 *** page_alloc.c 2001/12/14 15:27:01 1.13 --- page_alloc.c 2002/01/02 16:59:05 1.14 *************** *** 88,92 **** BUG(); page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty) | (1<<PG_comp_swap_cache)); ! if (current->flags & PF_FREE_PAGES) goto local_freelist; --- 88,92 ---- BUG(); page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty) | (1<<PG_comp_swap_cache)); ! if (current->flags & PF_FREE_PAGES) goto local_freelist; Index: vmscan.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v retrieving revision 1.23 retrieving revision 1.24 diff -C2 -r1.23 -r1.24 *** vmscan.c 2001/12/28 21:45:24 1.23 --- vmscan.c 2002/01/02 16:59:05 1.24 *************** *** 519,528 **** UnlockPage(page); - /* steal the page if we need to grow the comp cache */ - if (grow_comp_cache(page)) { - max_scan++; - continue; - } - /* effectively free the page here */ page_cache_release(page); --- 519,522 ---- *************** *** 604,609 **** do { nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); ! if (nr_pages <= 0) return 1; } while (--priority); --- 598,608 ---- do { nr_pages = shrink_caches(classzone, priority, gfp_mask, nr_pages); ! if (nr_pages <= 0) { ! /* let's steal at most half the pages that has ! * been freed by shrink_caches to grow ! * compressed cache (only for normal zone) */ ! grow_comp_cache(classzone, SWAP_CLUSTER_MAX/2); return 1; + } } while (--priority); |