[lc-checkins] CVS: linux/mm/comp_cache adaptivity.c,1.36,1.37 aux.c,1.40,1.41 free.c,1.42,1.43 main.
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-07-18 21:31:11
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv20667/mm/comp_cache
Modified Files:
adaptivity.c aux.c free.c main.c swapin.c vswap.c
Log Message:
Feature
o Make resizing (manual, not on demand) work with a preempted
kernel. First and very crude implementation. So far, swap cache
support and manual resizing are working in the tests that have been
run.
Cleanups
o Cleanups in virtual_swap_free() (now __virtual_swap_free())
function.
Index: adaptivity.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/adaptivity.c,v
retrieving revision 1.36
retrieving revision 1.37
diff -C2 -r1.36 -r1.37
*** adaptivity.c 16 Jul 2002 18:41:55 -0000 1.36
--- adaptivity.c 18 Jul 2002 21:31:08 -0000 1.37
***************
*** 2,6 ****
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-07-16 14:03:17 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/adaptivity.c
*
! * Time-stamp: <2002-07-18 15:44:59 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 116,135 ****
struct comp_cache_fragment * fragment;
struct vswap_address ** new_vswap_address;
- unsigned int failed_alloc = 0;
unsigned long index, new_index, vswap_new_num_entries = NUM_VSWAP_ENTRIES;
swp_entry_t old_entry, entry;
if (!vswap_address)
return;
! if (vswap_current_num_entries <= 1.10 * NUM_VSWAP_ENTRIES)
return;
/* more used entries than the new size? can't shrink */
if (vswap_num_used_entries >= NUM_VSWAP_ENTRIES)
! return;
!
! if (down_trylock(&vswap_resize_semaphore))
! return;
#if 0
--- 116,137 ----
struct comp_cache_fragment * fragment;
struct vswap_address ** new_vswap_address;
unsigned long index, new_index, vswap_new_num_entries = NUM_VSWAP_ENTRIES;
swp_entry_t old_entry, entry;
+ int failed_alloc = 0, ret;
if (!vswap_address)
return;
! if (down_trylock(&vswap_resize_semaphore))
return;
+ spin_lock(&virtual_swap_list);
+
+ if (vswap_current_num_entries <= 1.10 * NUM_VSWAP_ENTRIES)
+ goto out_unlock;
+
/* more used entries than the new size? can't shrink */
if (vswap_num_used_entries >= NUM_VSWAP_ENTRIES)
! goto out_unlock;
#if 0
***************
*** 214,218 ****
/* let's fix the ptes */
! if (!set_pte_list_to_entry(vswap_address[index]->pte_list, old_entry, entry))
goto backout;
--- 216,224 ----
/* let's fix the ptes */
! spin_unlock(&virtual_swap_list);
! ret = set_pte_list_to_entry(vswap_address[index]->pte_list, old_entry, entry);
! spin_lock(&virtual_swap_list);
!
! if (!ret)
goto backout;
***************
*** 271,282 ****
if (vswap_last_used >= vswap_new_num_entries)
! goto out;
allocate_new_vswap:
new_vswap_address = (struct vswap_address **) vmalloc(vswap_new_num_entries * sizeof(struct vswap_address*));
if (!new_vswap_address) {
vswap_failed_alloc = 1;
! goto out;
}
--- 277,290 ----
if (vswap_last_used >= vswap_new_num_entries)
! goto out_unlock;
allocate_new_vswap:
+ spin_unlock(&virtual_swap_list);
new_vswap_address = (struct vswap_address **) vmalloc(vswap_new_num_entries * sizeof(struct vswap_address*));
+ spin_lock(&virtual_swap_list);
if (!new_vswap_address) {
vswap_failed_alloc = 1;
! goto out_unlock;
}
***************
*** 326,331 ****
vswap_last_used = vswap_new_num_entries - 1;
vswap_failed_alloc = 0;
! out:
! up(&vswap_resize_semaphore);
}
--- 334,340 ----
vswap_last_used = vswap_new_num_entries - 1;
vswap_failed_alloc = 0;
! out_unlock:
! spin_unlock(&virtual_swap_list);
! up(&vswap_resize_semaphore);
}
***************
*** 348,359 ****
return;
/* using vswap_last_used instead of vswap_current_num_entries
* forces us to grow the cache even if we started shrinking
* it, but one set comp cache to the original size */
if (vswap_last_used >= 0.90 * (NUM_VSWAP_ENTRIES - 1))
! return;
!
! if (down_trylock(&vswap_resize_semaphore))
! return;
#if 0
--- 357,370 ----
return;
+ if (down_trylock(&vswap_resize_semaphore))
+ return;
+
+ spin_lock(&virtual_swap_list);
+
/* using vswap_last_used instead of vswap_current_num_entries
* forces us to grow the cache even if we started shrinking
* it, but one set comp cache to the original size */
if (vswap_last_used >= 0.90 * (NUM_VSWAP_ENTRIES - 1))
! goto out_unlock;
#if 0
***************
*** 366,375 ****
if (vswap_current_num_entries == vswap_new_num_entries)
goto fix_old_vswap;
-
- new_vswap_address = (struct vswap_address **) vmalloc(vswap_new_num_entries * sizeof(struct vswap_address*));
if (!new_vswap_address) {
vswap_failed_alloc = 1;
! goto out;
}
--- 377,388 ----
if (vswap_current_num_entries == vswap_new_num_entries)
goto fix_old_vswap;
+ spin_unlock(&virtual_swap_list);
+ new_vswap_address = (struct vswap_address **) vmalloc(vswap_new_num_entries * sizeof(struct vswap_address*));
+ spin_lock(&virtual_swap_list);
+
if (!new_vswap_address) {
vswap_failed_alloc = 1;
! goto out_unlock;
}
***************
*** 415,419 ****
vswap_last_used = vswap_new_num_entries - 1;
vswap_failed_alloc = 0;
! goto out;
fix_old_vswap:
--- 428,432 ----
vswap_last_used = vswap_new_num_entries - 1;
vswap_failed_alloc = 0;
! goto out_unlock;
fix_old_vswap:
***************
*** 434,438 ****
last_vswap_allocated = vswap_new_num_entries - 1;
vswap_last_used = vswap_current_num_entries - 1;
! out:
up(&vswap_resize_semaphore);
}
--- 447,452 ----
last_vswap_allocated = vswap_new_num_entries - 1;
vswap_last_used = vswap_current_num_entries - 1;
! out_unlock:
! spin_unlock(&virtual_swap_list);
up(&vswap_resize_semaphore);
}
***************
*** 484,487 ****
--- 498,503 ----
* check the comp_page and free it if possible, we don't want to
* perform an agressive shrinkage.
+ *
+ * caller must hold comp_cache_lock lock
*/
int
***************
*** 491,494 ****
--- 507,512 ----
int retval = 0;
+ spin_lock(&comp_cache_lock);
+
if (!comp_page->page)
BUG();
***************
*** 538,542 ****
shrink_fragment_hash_table();
shrink_vswap();
!
return retval;
--- 556,561 ----
shrink_fragment_hash_table();
shrink_vswap();
! out_unlock:
! spin_unlock(&comp_cache_lock);
return retval;
***************
*** 546,550 ****
if (!empty_comp_page || !empty_comp_page->page)
! return retval;
lock_page(empty_comp_page->page);
--- 565,569 ----
if (!empty_comp_page || !empty_comp_page->page)
! goto out_unlock;
lock_page(empty_comp_page->page);
***************
*** 553,557 ****
if (!list_empty(&(comp_page->fragments))) {
UnlockPage(empty_comp_page->page);
! return retval;
}
--- 572,576 ----
if (!list_empty(&(comp_page->fragments))) {
UnlockPage(empty_comp_page->page);
! goto out_unlock;
}
***************
*** 616,620 ****
--- 635,642 ----
struct comp_cache_page * comp_page;
struct page * page;
+ int ret = 0;
+ spin_lock(&comp_cache_lock);
+
while (comp_cache_needs_to_grow() && nrpages--) {
page = alloc_pages(GFP_ATOMIC, comp_page_order);
***************
*** 622,630 ****
/* couldn't allocate the page */
if (!page)
! return 0;
if (!init_comp_page(&comp_page, page)) {
__free_pages(page, comp_page_order);
! return 0;
}
--- 644,652 ----
/* couldn't allocate the page */
if (!page)
! goto out_unlock;
if (!init_comp_page(&comp_page, page)) {
__free_pages(page, comp_page_order);
! goto out_unlock;
}
***************
*** 637,653 ****
}
if (!comp_cache_needs_to_grow()) {
grow_zone_watermarks();
! goto out;
}
if (!fragment_failed_alloc && !vswap_failed_alloc)
! return 1;
! out:
grow_fragment_hash_table();
grow_vswap();
!
! return 1;
}
--- 659,678 ----
}
+ ret = 1;
+
if (!comp_cache_needs_to_grow()) {
grow_zone_watermarks();
! goto grow_structures;
}
if (!fragment_failed_alloc && !vswap_failed_alloc)
! goto out_unlock;
! grow_structures:
grow_fragment_hash_table();
grow_vswap();
! out_unlock:
! spin_unlock(&comp_cache_lock);
! return ret;
}
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.40
retrieving revision 1.41
diff -C2 -r1.40 -r1.41
*** aux.c 17 Jul 2002 20:44:36 -0000 1.40
--- aux.c 18 Jul 2002 21:31:08 -0000 1.41
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-07-17 16:06:21 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-07-18 14:14:42 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 127,130 ****
--- 127,131 ----
}
+ /* unlikely, but list can still be corrupted since it is not protected by any lock */
int
set_pte_list_to_entry(struct pte_list * start_pte_list, swp_entry_t old_entry, swp_entry_t entry)
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.42
retrieving revision 1.43
diff -C2 -r1.42 -r1.43
*** free.c 18 Jul 2002 13:32:50 -0000 1.42
--- free.c 18 Jul 2002 21:31:08 -0000 1.43
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-07-18 10:04:09 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-07-18 16:20:01 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 139,142 ****
--- 139,143 ----
}
+ /* caller must hold comp_cache_lock lock */
void
comp_cache_free_locked(struct comp_cache_fragment * fragment)
***************
*** 218,222 ****
}
! inline void
comp_cache_free(struct comp_cache_fragment * fragment) {
struct comp_cache_page * comp_page;
--- 219,224 ----
}
! /* caller must hold comp_cache_lock lock */
! void
comp_cache_free(struct comp_cache_fragment * fragment) {
struct comp_cache_page * comp_page;
***************
*** 322,326 ****
/* let's proceed to fix swap counter for either entries */
for(; num_freed_ptes > 0; --num_freed_ptes) {
! virtual_swap_free(vswap->offset);
swap_duplicate(entry);
}
--- 324,328 ----
/* let's proceed to fix swap counter for either entries */
for(; num_freed_ptes > 0; --num_freed_ptes) {
! __virtual_swap_free(vswap->offset);
swap_duplicate(entry);
}
***************
*** 338,342 ****
__delete_from_swap_cache(swap_cache_page);
spin_unlock(&pagecache_lock);
! virtual_swap_free(vswap->offset);
add_to_swap_cache(swap_cache_page, entry);
--- 340,344 ----
__delete_from_swap_cache(swap_cache_page);
spin_unlock(&pagecache_lock);
! __virtual_swap_free(vswap->offset);
add_to_swap_cache(swap_cache_page, entry);
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.58
retrieving revision 1.59
diff -C2 -r1.58 -r1.59
*** main.c 17 Jul 2002 21:45:12 -0000 1.58
--- main.c 18 Jul 2002 21:31:08 -0000 1.59
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-07-17 18:28:09 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-07-18 13:19:51 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 127,131 ****
if (!dirty)
BUG();
! invalidate_comp_cache(page->mapping, page->index);
}
--- 127,131 ----
if (!dirty)
BUG();
! __invalidate_comp_cache(page->mapping, page->index);
}
Index: swapin.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v
retrieving revision 1.46
retrieving revision 1.47
diff -C2 -r1.46 -r1.47
*** swapin.c 17 Jul 2002 20:44:36 -0000 1.46
--- swapin.c 18 Jul 2002 21:31:08 -0000 1.47
***************
*** 2,6 ****
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-07-17 13:58:48 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-07-18 17:59:01 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 19,30 ****
int
! invalidate_comp_cache(struct address_space *mapping, unsigned long offset)
{
struct comp_cache_fragment * fragment;
! int err = find_comp_page(mapping, offset, &fragment);
if (!err)
comp_cache_free(fragment);
return err;
}
--- 19,40 ----
int
! __invalidate_comp_cache(struct address_space * mapping, unsigned long offset)
{
struct comp_cache_fragment * fragment;
! int err = find_comp_page(mapping, offset, &fragment);
if (!err)
comp_cache_free(fragment);
+ return err;
+ }
+ int
+ invalidate_comp_cache(struct address_space * mapping, unsigned long offset)
+ {
+ int err;
+
+ spin_lock(&comp_cache_lock);
+ err = __invalidate_comp_cache(mapping, offset);
+ spin_unlock(&comp_cache_lock);
return err;
}
***************
*** 38,43 ****
int err = -ENOENT;
if (likely(!PageTestandClearCompCache(page)))
! goto out;
/* we may have a null page->mapping if the page have been
--- 48,55 ----
int err = -ENOENT;
+ spin_lock(&comp_cache_lock);
+
if (likely(!PageTestandClearCompCache(page)))
! goto out_unlock;
/* we may have a null page->mapping if the page have been
***************
*** 47,51 ****
if (err)
! goto out;
if (CompFragmentTestandClearDirty(fragment)) {
--- 59,63 ----
if (err)
! goto out_unlock;
if (CompFragmentTestandClearDirty(fragment)) {
***************
*** 58,62 ****
}
comp_cache_free(fragment);
! out:
return err;
}
--- 70,76 ----
}
comp_cache_free(fragment);
!
! out_unlock:
! spin_unlock(&comp_cache_lock);
return err;
}
***************
*** 139,142 ****
--- 153,158 ----
struct list_head * fragment_lh, * tmp_lh;
struct comp_cache_fragment * fragment;
+
+ spin_lock(&comp_cache_lock);
list_for_each_safe(fragment_lh, tmp_lh, list) {
***************
*** 146,149 ****
--- 162,167 ----
comp_cache_free(fragment);
}
+
+ spin_unlock(&comp_cache_lock);
}
***************
*** 183,192 ****
struct comp_cache_fragment * fragment;
if (list_empty(&mapping->dirty_comp_pages))
! return;
page = page_cache_alloc(mapping);
if (!page)
! return;
if (list_empty(&mapping->dirty_comp_pages))
--- 201,212 ----
struct comp_cache_fragment * fragment;
+ spin_lock(&comp_cache_lock);
+
if (list_empty(&mapping->dirty_comp_pages))
! goto out_unlock;
page = page_cache_alloc(mapping);
if (!page)
! goto out_unlock;
if (list_empty(&mapping->dirty_comp_pages))
***************
*** 221,224 ****
--- 241,246 ----
out_release:
page_cache_release(page);
+ out_unlock:
+ spin_unlock(&comp_cache_lock);
}
Index: vswap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v
retrieving revision 1.43
retrieving revision 1.44
diff -C2 -r1.43 -r1.44
*** vswap.c 18 Jul 2002 13:32:51 -0000 1.43
--- vswap.c 18 Jul 2002 21:31:08 -0000 1.44
***************
*** 2,6 ****
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-07-18 10:01:50 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-07-18 17:59:42 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 288,292 ****
*/
int
! virtual_swap_free(unsigned long offset)
{
unsigned int swap_count;
--- 288,292 ----
*/
int
! __virtual_swap_free(unsigned long offset)
{
unsigned int swap_count;
***************
*** 323,326 ****
--- 323,327 ----
vswap->pte_list = NULL;
vswap->swap_cache_page = NULL;
+ vswap->fragment = NULL;
/* if this entry is reserved, it's not on any list (either
***************
*** 330,338 ****
if (fragment == VSWAP_RESERVED) {
vswap_num_reserved_entries--;
! vswap->fragment = NULL;
! list_add(&(vswap->list), &vswap_address_free_head);
! nr_free_vswap++;
!
! return 0;
}
--- 331,335 ----
if (fragment == VSWAP_RESERVED) {
vswap_num_reserved_entries--;
! goto out;
}
***************
*** 341,359 ****
/* remove from used list */
! list_del_init(&(vswap_address[offset]->list));
nr_used_vswap--;
- vswap->fragment = NULL;
/* add to to the free list */
list_add(&(vswap->list), &vswap_address_free_head);
nr_free_vswap++;
! /* global freeable space */
! comp_cache_freeable_space += fragment->compressed_size;
! /* whops, it will DEADLOCK when shrinking the vswap table
! * since we hold virtual_swap_list */
comp_cache_free(fragment);
! return 0;
}
--- 338,380 ----
/* remove from used list */
! list_del(&(vswap_address[offset]->list));
nr_used_vswap--;
+ /* global freeable space */
+ comp_cache_freeable_space += fragment->compressed_size;
+ out:
/* add to to the free list */
list_add(&(vswap->list), &vswap_address_free_head);
nr_free_vswap++;
+ return 0;
+ }
! /* caller must hold vswap_list_lock
! * retuns virtual_swap_list unlocked */
! int
! virtual_swap_free(unsigned long offset)
! {
! struct comp_cache_fragment * fragment;
! int ret;
!
! fragment = vswap_address[offset]->fragment;
! ret = __virtual_swap_free(offset);
!
! if (ret)
! goto out_unlock;
!
! if (fragment == VSWAP_RESERVED)
! goto out_unlock;
! spin_unlock(&virtual_swap_list);
!
! spin_lock(&comp_cache_lock);
comp_cache_free(fragment);
! spin_unlock(&comp_cache_lock);
! out:
! return ret;
! out_unlock:
! spin_unlock(&virtual_swap_list);
! goto out;
}
|