[lc-checkins] CVS: linux/mm/comp_cache swapout.c,1.51,1.52 vswap.c,1.34,1.35
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2002-06-19 18:10:25
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv28892/mm/comp_cache Modified Files: swapout.c vswap.c Log Message: Bug fixes o Fixed potential bug in get_comp_cache_page() when adding a fragment to an empty comp page o Fixed a bug that wouldn't allocate some vswap entries if they couldn't be allocated for the first time (in comp_cache_vswap_alloc()). It means that if we allocated only one third of vswap entries, it would oom kill some process, but wouldn't try to allocate the rest of vswap entries later. This bug usually didn't happen before since vswap data structures were allocated at the boot time. Other o Now we don't try to refill swap buffer with many pages in get_comp_cache_page(), but only one. That was done to the scenario where we could refill with few pages, but not the amount we previously set (SWAP_CLUSTER_MAX >> 2), so we end up writing out fragments. Index: swapout.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v retrieving revision 1.51 retrieving revision 1.52 diff -C2 -r1.51 -r1.52 *** swapout.c 19 Jun 2002 12:18:44 -0000 1.51 --- swapout.c 19 Jun 2002 18:10:20 -0000 1.52 *************** *** 2,6 **** * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-06-19 08:47:28 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * /mm/comp_cache/swapout.c * ! * Time-stamp: <2002-06-19 11:34:35 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 448,452 **** UnlockPage(page); ! if (!refill_swp_buffer(gfp_mask, SWAP_CLUSTER_MAX >> 2, priority)) writeout_fragments(gfp_mask, priority--); --- 448,452 ---- UnlockPage(page); ! if (!refill_swp_buffer(gfp_mask, 1, priority)) writeout_fragments(gfp_mask, priority--); *************** *** 542,545 **** --- 542,550 ---- /* add the fragment to the comp_page list of fragments */ + if (list_empty(&(comp_page->fragments))) { + list_add(&(fragment->list), &(comp_page->fragments)); + goto out; + } + previous_fragment = list_entry(comp_page->fragments.prev, struct comp_cache_fragment, list); Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.34 retrieving revision 1.35 diff -C2 -r1.34 -r1.35 *** vswap.c 19 Jun 2002 12:18:44 -0000 1.34 --- vswap.c 19 Jun 2002 18:10:20 -0000 1.35 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-06-19 08:47:38 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2002-06-19 14:48:47 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 24,29 **** struct vswap_address ** vswap_address = NULL; - struct list_head vswap_address_free_head; struct list_head vswap_address_used_head; static struct pte_list * pte_list_freelist = NULL; --- 24,29 ---- struct vswap_address ** vswap_address = NULL; struct list_head vswap_address_used_head; + struct list_head vswap_address_free_head; static struct pte_list * pte_list_freelist = NULL; *************** *** 51,54 **** --- 51,58 ---- unsigned int vswap_last_used; + /* last vswap which has been allocated (this index will be used to try + * to allocate in case the vswap addresses are over) */ + unsigned int last_vswap_allocated; + unsigned short * last_page_size; unsigned short last_page = 0; *************** *** 71,76 **** vswap_num_swap_cache = 0; ! for (i = 0; i < NUM_VSWAP_ENTRIES; i++) ! vswap_alloc_and_init(vswap_address, i); return 1; --- 75,80 ---- vswap_num_swap_cache = 0; ! for (i = 0; i < NUM_VSWAP_ENTRIES && vswap_alloc_and_init(vswap_address, i); i++); ! last_vswap_allocated = i - 1; return 1; *************** *** 106,116 **** return 0; ! /* no more free vswap address or too many used entries for the ! * current compressed cache size? so no available space */ ! if (list_empty(&vswap_address_free_head) || vswap_num_used_entries >= NUM_VSWAP_ENTRIES) ! return 0; available_mean_size = (unsigned short) (comp_cache_freeable_space/num_comp_pages); ! if (available_mean_size > PAGE_SIZE) BUG(); --- 110,134 ---- return 0; ! /* no more free vswap address? */ ! if (list_empty(&vswap_address_free_head)) { ! /* have all vswap addresses already been allocated? */ ! if (last_vswap_allocated == NUM_VSWAP_ENTRIES - 1) ! return 0; ! ! /* allocate an index that has failed to allocate */ ! if (!vswap_alloc_and_init(vswap_address, last_vswap_allocated + 1)) ! return 0; ! ! last_vswap_allocated++; ! return 1; ! } + /* or too many used entries for the current compressed cache + * size? so no available space */ + if (vswap_num_used_entries >= NUM_VSWAP_ENTRIES) + return 0; + available_mean_size = (unsigned short) (comp_cache_freeable_space/num_comp_pages); ! if (available_mean_size > PAGE_SIZE) BUG(); *************** *** 662,671 **** * */ ! void vswap_alloc_and_init(struct vswap_address ** vswap_address, unsigned long offset) { vswap_address[offset] = alloc_vswap(); if (!vswap_address[offset]) ! return; vswap_address[offset]->offset = offset; --- 680,689 ---- * */ ! int vswap_alloc_and_init(struct vswap_address ** vswap_address, unsigned long offset) { vswap_address[offset] = alloc_vswap(); if (!vswap_address[offset]) ! return 0; vswap_address[offset]->offset = offset; *************** *** 676,679 **** --- 694,698 ---- list_add(&(vswap_address[offset]->list), &vswap_address_free_head); + return 1; } |