[lc-checkins] CVS: linux/mm/comp_cache aux.c,1.19,1.20 free.c,1.23,1.24 main.c,1.25,1.26 swapin.c,1.
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-02-25 19:34:44
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv26754/mm/comp_cache
Modified Files:
aux.c free.c main.c swapin.c swapout.c vswap.c
Log Message:
This version fixes some bugs regarding the clean pages support. And
fixes many other bugs with the page cache support, besides code
cleanups. The improvement done in this version is the replacement of
the slab cache by Rik van Riel's rmap allocation for pte lists.
- now address space have two lists for compressed pages: dirty and
clean lists.
- some static inline functions that were in comp_cache.h have been
moved to the c file that uses it.
- all vswap functions that are often called are now compiled with
FASTCALL option.
- all vswap functions have been documented and cleaned up.
- since our pte_list struct is smaller than the minimum size needed
for slab cache, slab cache has been replaced by Rik's allocation,
that's pretty simple.
- failed fragment allocations are handled now.
- shared memory _broken_ support removed, since it was broken and only
bothering us.
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.19
retrieving revision 1.20
diff -C2 -r1.19 -r1.20
*** aux.c 23 Feb 2002 19:19:27 -0000 1.19
--- aux.c 25 Feb 2002 19:34:41 -0000 1.20
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-02-23 16:13:24 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2002-02-25 09:36:23 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 200,204 ****
*fragment = NULL;
! if (list_empty(&mapping->comp_pages))
goto not_found;
--- 200,204 ----
*fragment = NULL;
! if (list_empty(&mapping->clean_comp_pages) && list_empty(&mapping->dirty_comp_pages))
goto not_found;
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.23
retrieving revision 1.24
diff -C2 -r1.23 -r1.24
*** free.c 23 Feb 2002 18:24:11 -0000 1.23
--- free.c 25 Feb 2002 19:34:41 -0000 1.24
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-02-23 12:03:56 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2002-02-25 09:04:00 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 186,190 ****
unsigned long address;
struct vm_area_struct * vma;
! pte_t pte;
struct vswap_address * vswap;
struct list_head * vswap_lh;
--- 186,190 ----
unsigned long address;
struct vm_area_struct * vma;
! pte_t pte, * ptep;
struct vswap_address * vswap;
struct list_head * vswap_lh;
***************
*** 248,252 ****
BUG();
! comp_cache_swp_free_generic(old_entry, 0);
page_cache_release(swap_cache_page);
UnlockPage(swap_cache_page);
--- 248,252 ----
BUG();
! comp_cache_swp_free(old_entry);
page_cache_release(swap_cache_page);
UnlockPage(swap_cache_page);
***************
*** 259,264 ****
next_pte_list = pte_list->next;
! mm = ptep_to_mm(pte_list->ptep);
! address = ptep_to_address(pte_list->ptep);
vma = find_vma(mm, address);
--- 259,266 ----
next_pte_list = pte_list->next;
! ptep = pte_list->ptep;
!
! mm = ptep_to_mm(ptep);
! address = ptep_to_address(ptep);
vma = find_vma(mm, address);
***************
*** 268,281 ****
if (!vma)
goto next;
-
- remove_pte_vswap(pte_list->ptep);
! pte = ptep_get_and_clear(pte_list->ptep);
flush_tlb_page(vma, address);
flush_cache_page(vma, address);
! set_pte(pte_list->ptep, swp_entry_to_pte(entry));
!
! comp_cache_swp_free_generic(old_entry, 0);
next:
--- 270,283 ----
if (!vma)
goto next;
! remove_pte_vswap(ptep);
!
! pte = ptep_get_and_clear(ptep);
flush_tlb_page(vma, address);
flush_cache_page(vma, address);
! set_pte(ptep, swp_entry_to_pte(entry));
!
! comp_cache_swp_free(old_entry);
next:
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.25
retrieving revision 1.26
diff -C2 -r1.25 -r1.26
*** main.c 23 Feb 2002 18:24:11 -0000 1.25
--- main.c 25 Feb 2002 19:34:41 -0000 1.26
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-02-23 15:09:18 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2002-02-25 15:23:44 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 57,60 ****
--- 57,93 ----
extern comp_cache_t * get_comp_cache_page(struct page *, unsigned short, comp_cache_fragment_t **, int, unsigned int);
+ extern void comp_cache_update_comp_stats(unsigned short, stats_page_t *);
+
+ static inline void
+ set_fragment_algorithm(comp_cache_fragment_t * fragment, unsigned short algorithm)
+ {
+ switch(algorithm) {
+ case WKDM_IDX:
+ CompFragmentSetWKdm(fragment);
+ break;
+ case WK4X4_IDX:
+ CompFragmentSetWK4x4(fragment);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ static inline int
+ compress(void * from, void * to, unsigned short * algorithm)
+ {
+ stats_page_t comp_page_stats;
+
+ START_ZEN_TIME(comp_page_stats.myTimer);
+ comp_page_stats.comp_size = compression_algorithms[current_algorithm].comp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
+ STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.comp_cycles);
+
+ /* update some statistics */
+ comp_cache_update_comp_stats(current_algorithm, &comp_page_stats);
+
+ *algorithm = current_algorithm;
+
+ return ((comp_page_stats.comp_size <= PAGE_SIZE)?comp_page_stats.comp_size:PAGE_SIZE);
+ }
int
***************
*** 70,79 ****
if (!PageLocked(page))
BUG();
/* compress to a buffer */
current_compressed_page = (unsigned long) page;
-
aux_comp_size = compress(page_address(page), (unsigned long *) &buffer_compressed, &algorithm);
-
comp_page = get_comp_cache_page(page, aux_comp_size, &fragment, dirty, gfp_mask);
--- 103,118 ----
if (!PageLocked(page))
BUG();
+
+ if (PageCompCache(page)) {
+ if (!dirty)
+ BUG();
+ invalidate_comp_page(page->mapping, page->index, page);
+ if (PageDirty(page))
+ BUG();
+ }
/* compress to a buffer */
current_compressed_page = (unsigned long) page;
aux_comp_size = compress(page_address(page), (unsigned long *) &buffer_compressed, &algorithm);
comp_page = get_comp_cache_page(page, aux_comp_size, &fragment, dirty, gfp_mask);
***************
*** 85,94 ****
if (!comp_page) {
if (!dirty)
! return 0;
set_page_dirty(page);
! page->flags &= ~(1 >> PG_launder);
! UnlockPage(page);
! return 0;
}
--- 124,132 ----
if (!comp_page) {
if (!dirty)
! goto out_failed;
set_page_dirty(page);
! ClearPageLaunder(page);
! goto out_failed;
}
***************
*** 96,101 ****
* many page to be compressed twice */
! if (not_compressed(fragment))
! goto no_compress;
/* someone used the buffer while we slept to get a comp cache
--- 134,141 ----
* many page to be compressed twice */
! if (not_compressed(fragment)) {
! memcpy(page_address(comp_page->page), page_address(page), PAGE_SIZE);
! goto out;
! }
/* someone used the buffer while we slept to get a comp cache
***************
*** 116,137 ****
out:
! if (!comp_page->page)
! BUG();
!
UnlockPage(comp_page->page);
!
! PageSetCompCache(page);
UnlockPage(page);
return 0;
-
- no_compress:
- if (fragment->compressed_size != PAGE_SIZE)
- BUG();
-
- if (fragment->offset != 0)
- BUG();
-
- memcpy(page_address(comp_page->page), page_address(page), PAGE_SIZE);
- goto out;
}
--- 156,166 ----
out:
! if (PageTestandSetCompCache(page))
! BUG();
UnlockPage(comp_page->page);
!
! out_failed:
UnlockPage(page);
return 0;
}
Index: swapin.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v
retrieving revision 1.21
retrieving revision 1.22
diff -C2 -r1.21 -r1.22
*** swapin.c 23 Feb 2002 18:24:11 -0000 1.21
--- swapin.c 25 Feb 2002 19:34:41 -0000 1.22
***************
*** 2,6 ****
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-02-23 11:46:08 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2002-02-25 15:25:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 19,50 ****
void
- decompress_fragment(comp_cache_fragment_t * fragment, struct page * uncompressed_page)
- {
- comp_cache_t * comp_page = fragment->comp_page;
-
- if (!comp_page || !comp_page->page)
- BUG();
- if (!PageLocked(comp_page->page))
- BUG();
- if (!PageLocked(uncompressed_page))
- BUG();
-
- if (not_compressed(fragment))
- goto only_copy;
-
- decompress(fragment_algorithm(fragment), page_address(comp_page->page) + fragment->offset, page_address(uncompressed_page));
-
- return;
-
- only_copy:
- memcpy(page_address(uncompressed_page), page_address(comp_page->page), PAGE_SIZE);
- }
-
- void
invalidate_comp_page(struct address_space *mapping, unsigned long offset, struct page * page) {
comp_cache_fragment_t * fragment;
int err = find_comp_page(mapping, offset, &fragment);
! if (page && PageCompCache(page))
PageClearCompCache(page);
--- 19,27 ----
void
invalidate_comp_page(struct address_space *mapping, unsigned long offset, struct page * page) {
comp_cache_fragment_t * fragment;
int err = find_comp_page(mapping, offset, &fragment);
! if (page)
PageClearCompCache(page);
***************
*** 58,67 ****
}
! /**
! */
! int
! lookup_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page)
{
comp_cache_t * comp_page;
comp_cache_fragment_t * fragment;
swp_entry_t entry;
--- 35,105 ----
}
! extern void comp_cache_update_decomp_stats(unsigned short, stats_page_t *);
!
! static void
! decompress(comp_cache_fragment_t * fragment, struct page * page)
! {
! stats_page_t comp_page_stats;
! unsigned int algorithm = -1;
! void * from, * to;
!
! if (CompFragmentWKdm(fragment)) {
! algorithm = WKDM_IDX;
! goto actually_decomp;
! }
! if (CompFragmentWK4x4(fragment)) {
! algorithm = WK4X4_IDX;
! goto actually_decomp;
! }
!
! BUG();
!
! actually_decomp:
! from = page_address(fragment->comp_page->page) + fragment->offset;
! to = page_address(page);
!
! START_ZEN_TIME(comp_page_stats.myTimer);
! compression_algorithms[algorithm].decomp(from, to, PAGE_SIZE/4, (void *)(&comp_data));
! STOP_ZEN_TIME(comp_page_stats.myTimer, comp_page_stats.decomp_cycles);
!
! /* update some statistics */
! comp_cache_update_decomp_stats(algorithm, &comp_page_stats);
! }
!
! void
! decompress_fragment(comp_cache_fragment_t * fragment, struct page * page)
{
comp_cache_t * comp_page;
+
+ if (!fragment)
+ BUG();
+ comp_page = fragment->comp_page;
+ if (!comp_page->page)
+ BUG();
+ if (!PageLocked(page))
+ BUG();
+ if (!PageLocked(comp_page->page))
+ BUG();
+
+ if (compressed(fragment))
+ decompress(fragment, page);
+ else
+ memcpy(page_address(page), page_address(comp_page->page), PAGE_SIZE);
+
+ if (CompFragmentTestandClearDirty(fragment)) {
+ /* we want to set dirty only actually mapped pages,
+ * not temporary swap buffers, for example */
+ if (!list_empty(&page->list))
+ set_page_dirty(page);
+ list_del(&fragment->mapping_list);
+ list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
+ }
+ SetPageUptodate(page);
+ PageSetCompCache(page);
+ }
+
+ inline int
+ lookup_comp_cache(struct address_space *mapping, unsigned long offset, struct page * page)
+ {
comp_cache_fragment_t * fragment;
swp_entry_t entry;
***************
*** 69,73 ****
err = find_comp_page(mapping, offset, &fragment);
!
/* it will happen with vswap address only if the vswap address
* had a real address assigned */
--- 107,111 ----
err = find_comp_page(mapping, offset, &fragment);
!
/* it will happen with vswap address only if the vswap address
* had a real address assigned */
***************
*** 75,117 ****
goto check_vswap;
! comp_page = fragment->comp_page;
!
! /* sanity check */
! if (!comp_page->page)
! BUG();
! if (!PageLocked(page))
BUG();
! if (TryLockPage(comp_page->page))
BUG();
decompress_fragment(fragment, page);
!
! if (CompFragmentDirty(fragment))
! set_page_dirty(page);
! SetPageUptodate(page);
!
! comp_cache_update_faultin_stats();
/* NOTE: we have to make sure to free the fragment only when
* the page fault has been serviced since we may shrink the
* cache and move down the vswap entry */
! comp_cache_free_locked(fragment);
UnlockPage(page);
-
- return 0;
check_vswap:
if (mapping != &swapper_space)
goto out;
!
entry.val = offset;
if (!vswap_address(entry))
goto out;
!
if (vswap_address[SWP_OFFSET(entry)]->real_entry.val)
err = -EEXIST;
!
out:
return err;
--- 113,147 ----
goto check_vswap;
! if (TryLockPage(fragment->comp_page->page))
BUG();
! if (!page->pprev_hash)
BUG();
decompress_fragment(fragment, page);
!
! /* update fault in stats */
! compression_algorithms[current_algorithm].stats.pgccin++;
/* NOTE: we have to make sure to free the fragment only when
* the page fault has been serviced since we may shrink the
* cache and move down the vswap entry */
! comp_cache_free_locked(fragment);
UnlockPage(page);
+ return 0;
+
check_vswap:
if (mapping != &swapper_space)
goto out;
!
entry.val = offset;
if (!vswap_address(entry))
goto out;
!
if (vswap_address[SWP_OFFSET(entry)]->real_entry.val)
err = -EEXIST;
!
out:
return err;
***************
*** 120,133 ****
extern struct page * find_and_dirty_page(struct address_space *mapping, unsigned long offset, struct page **hash);
! void
! truncate_comp_pages(struct address_space * mapping, unsigned long start, unsigned partial)
{
struct list_head * fragment_lh, * tmp_lh;
comp_cache_fragment_t * fragment;
! if (list_empty(&mapping->comp_pages))
! return;
!
! list_for_each_safe(fragment_lh, tmp_lh, &mapping->comp_pages) {
fragment = list_entry(fragment_lh, comp_cache_fragment_t, mapping_list);
--- 150,160 ----
extern struct page * find_and_dirty_page(struct address_space *mapping, unsigned long offset, struct page **hash);
! static void
! truncate_list_comp_pages(struct list_head * list, unsigned long start, unsigned partial)
{
struct list_head * fragment_lh, * tmp_lh;
comp_cache_fragment_t * fragment;
! list_for_each_safe(fragment_lh, tmp_lh, list) {
fragment = list_entry(fragment_lh, comp_cache_fragment_t, mapping_list);
***************
*** 140,143 ****
--- 167,177 ----
void
+ truncate_comp_pages(struct address_space * mapping, unsigned long start, unsigned partial)
+ {
+ truncate_list_comp_pages(&mapping->clean_comp_pages, start, partial);
+ truncate_list_comp_pages(&mapping->dirty_comp_pages, start, partial);
+ }
+
+ void
lookup_all_comp_pages(struct address_space * mapping)
{
***************
*** 146,150 ****
comp_cache_fragment_t * fragment;
! if (list_empty(&mapping->comp_pages))
return;
--- 180,184 ----
comp_cache_fragment_t * fragment;
! if (list_empty(&mapping->dirty_comp_pages))
return;
***************
*** 155,284 ****
try_again:
! if (list_empty(&mapping->comp_pages)) {
page_cache_release(page);
return;
}
! fragment = list_entry(mapping->comp_pages.next, comp_cache_fragment_t, mapping_list);
hash = page_hash(mapping, fragment->index);
if (add_to_page_cache_unique(page, mapping, fragment->index, hash)) {
! if (CompFragmentDirty(fragment) && !find_and_dirty_page(mapping, fragment->index, hash))
BUG();
! comp_cache_free(fragment);
goto try_again;
}
-
- if (lookup_comp_cache(mapping, fragment->index, page))
- BUG();
-
- page_cache_release(page);
- goto alloc_page;
- }
-
- /* WARNING: this function is BROKEN. Have to FIX it ASAP. Rodrigo. */
- struct page *
- shmem_lookup_comp_cache(swp_entry_t * entry, struct address_space * mapping, unsigned long idx)
- {
- struct page * page, * aux_page = NULL, * new_page = NULL;
- comp_cache_t * comp_page = NULL;
- comp_cache_fragment_t * fragment = NULL;
- unsigned long offset;
-
- offset = SWP_OFFSET(*entry);
-
- /* it might have been compressed but not yet freed */
- page = __find_get_page(&swapper_space, entry->val, page_hash(&swapper_space, entry->val));
-
- if (page)
- goto found_swap_cache;
-
- goto try_comp_cache;
-
- try_page_cache:
- comp_page = NULL;
-
- page = __find_get_page(mapping, idx, page_hash(mapping, idx));
-
- if (page)
- goto found_swap_cache;
! /* fall through */
! try_comp_cache:
! comp_page = find_comp_page(&swapper_space, entry->val, &fragment);
!
! if (!comp_page) {
! if (new_page) {
! page_cache_release(new_page);
! wait_on_page(aux_page);
! }
!
! goto out;
! }
!
! if (!comp_page->page)
! BUG();
!
! if (TryLockPage(comp_page->page))
! BUG();
!
! if (!new_page) {
! aux_page = comp_page->page;
! UnlockPage(comp_page->page);
!
! new_page = alloc_page(GFP_HIGHUSER);
!
! goto try_page_cache;
! }
!
! if (TryLockPage(new_page))
BUG();
- page = new_page;
decompress_fragment(fragment, page);
-
- comp_cache_update_faultin_stats();
! if (add_to_swap_cache(page, *entry))
BUG();
- set_page_dirty(page);
! DEBUG_CHECK_COUNT;
! /* NOTE: we have to make sure to free the fragment only when
! * the page fault has been serviced since we may shrink the
! * cache and move down the vswap entry. If that happens,
! * recall that in do_swap_page we will notice that the pte has
! * changed and will back out the fault */
! comp_cache_free_locked(fragment);
!
UnlockPage(page);
! goto out;
!
! found_swap_cache:
! if (!comp_page)
! comp_page = find_comp_page(&swapper_space, entry->val, &fragment);
!
! if (comp_page) {
! comp_cache_free(fragment);
!
! /* if the page has been added to comp cache, it was
! * dirty and had its dirty bit cleared when it was
! * compressed (ie, written). Well, if it's still
! * present in swap cache and in comp cache too, it
! * means that it has not yet been swapped out. So I
! * gotta set the page dirty bit back to make it to be
! * compressed if needed. */
! set_page_dirty(page);
! }
!
! if (new_page)
! page_cache_release(new_page);
!
! out:
! if (comp_page && PageLocked(comp_page->page))
! BUG();
!
! return page;
}
--- 189,225 ----
try_again:
! if (list_empty(&mapping->dirty_comp_pages)) {
page_cache_release(page);
return;
}
! fragment = list_entry(mapping->dirty_comp_pages.next, comp_cache_fragment_t, mapping_list);
hash = page_hash(mapping, fragment->index);
+
+ if (!CompFragmentDirty(fragment))
+ BUG();
if (add_to_page_cache_unique(page, mapping, fragment->index, hash)) {
! if (!find_and_dirty_page(mapping, fragment->index, hash))
BUG();
! CompFragmentClearDirty(fragment);
! list_del(&fragment->mapping_list);
! list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
goto try_again;
}
! if (TryLockPage(fragment->comp_page->page))
BUG();
decompress_fragment(fragment, page);
! if (!PageDirty(page))
BUG();
! UnlockPage(fragment->comp_page->page);
! page_cache_release(page);
UnlockPage(page);
! goto alloc_page;
}
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.27
retrieving revision 1.28
diff -C2 -r1.27 -r1.28
*** swapout.c 23 Feb 2002 18:24:11 -0000 1.27
--- swapout.c 25 Feb 2002 19:34:41 -0000 1.28
***************
*** 2,6 ****
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-02-23 14:52:16 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* /mm/comp_cache/swapout.c
*
! * Time-stamp: <2002-02-25 16:12:13 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 108,116 ****
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
- buffer_page->index = fragment->index;
- buffer_page->mapping = fragment->mapping;
-
swp_buffer->fragment = fragment;
return (swp_buffer);
}
--- 108,116 ----
list_add_tail(swp_buffer_lh, &swp_used_buffer_head);
swp_buffer->fragment = fragment;
+ buffer_page->index = fragment->index;
+ buffer_page->mapping = fragment->mapping;
+
return (swp_buffer);
}
***************
*** 118,122 ****
extern void decompress_fragment(comp_cache_fragment_t *, struct page *);
! static inline struct swp_buffer * decompress_to_swp_buffer(comp_cache_fragment_t * fragment) {
struct page * buffer_page;
struct swp_buffer * swp_buffer;
--- 118,123 ----
extern void decompress_fragment(comp_cache_fragment_t *, struct page *);
! static struct swp_buffer *
! decompress_to_swp_buffer(comp_cache_fragment_t * fragment) {
struct page * buffer_page;
struct swp_buffer * swp_buffer;
***************
*** 132,142 ****
if (!buffer_page)
BUG();
-
if (TryLockPage(fragment->comp_page->page))
BUG();
decompress_fragment(fragment, buffer_page);
UnlockPage(fragment->comp_page->page);
return swp_buffer;
}
--- 133,144 ----
if (!buffer_page)
BUG();
if (TryLockPage(fragment->comp_page->page))
BUG();
decompress_fragment(fragment, buffer_page);
+ buffer_page->flags &= (1 << PG_locked);
UnlockPage(fragment->comp_page->page);
+
return swp_buffer;
}
***************
*** 222,228 ****
extern void add_fragment_vswap(comp_cache_fragment_t *);
! /* get_comp_cache_page(page, compressed_size, fragment_out, gfp_mask)
! * - gets a Compressed Cache entry, freeing one if necessary. This is
! * done writing out the data the chosen (to be freed) entry stores.
*
* return value: pointer to the comp cache entry to be used. The comp
--- 224,231 ----
extern void add_fragment_vswap(comp_cache_fragment_t *);
! /***
! * get_comp_cache_page - gets a Compressed Cache entry, freeing one if
! * necessary. This is done writing out the data the chosen (to be
! * freed) entry stores.
*
* return value: pointer to the comp cache entry to be used. The comp
***************
*** 322,344 ****
BUG();
! check_references:
! /* it has been faulted in by some process(es), so we should
! * not compress this page. In vanilla kernel, we will handle
! * the case where page_count > 2 but there is no buffers to be
! * cleaned. In ac series, we will handle cases where there are
! * any references to this page but us and swap cache (and
! * buffers if we have pending buffers to be cleaned).
! *
! * ac:
! * if ((page_count(page) - !!page->buffers) > 2) {
! *
! * linus:
! * if (!page->buffers && page_count(page) > 2) {
! * */
! if (mapped(page)) {
! add_comp_page_to_hash_table(comp_page);
! UnlockPage(comp_page->page);
! return NULL;
! }
if (!comp_page->free_space)
--- 325,332 ----
BUG();
! check_references:
! /* 2 = us + page cache */
! if (page_count(page) - !!page->buffers != 2)
! goto out_failed;
if (!comp_page->free_space)
***************
*** 347,350 ****
--- 335,342 ----
/* allocate the new fragment */
fragment = alloc_fragment();
+
+ if (!fragment)
+ goto out_failed;
+
fragment->index = page->index;
fragment->mapping = page->mapping;
***************
*** 354,360 ****
fragment->comp_page = comp_page;
- if (dirty)
- CompFragmentSetDirty(fragment);
-
#if 0
{
--- 346,349 ----
***************
*** 372,377 ****
comp_page->free_offset += compressed_size;
- page->mapping->nrpages++;
-
last_page_size[last_page] = compressed_size;
last_page = (last_page++)%NUM_MEAN_PAGES;
--- 361,364 ----
***************
*** 407,414 ****
out:
! /* only real swap adressed fragments are added to lru queue */
add_fragment_to_hash_table(fragment);
add_fragment_to_lru_queue(fragment);
- list_add(&(fragment->mapping_list), &fragment->mapping->comp_pages);
if (comp_page->free_space < 0)
--- 394,407 ----
out:
! if (dirty) {
! CompFragmentSetDirty(fragment);
! list_add(&fragment->mapping_list, &fragment->mapping->dirty_comp_pages);
! }
! else
! list_add(&fragment->mapping_list, &fragment->mapping->clean_comp_pages);
! page->mapping->nrpages++;
!
add_fragment_to_hash_table(fragment);
add_fragment_to_lru_queue(fragment);
if (comp_page->free_space < 0)
***************
*** 423,426 ****
--- 416,424 ----
return comp_page;
+
+ out_failed:
+ add_comp_page_to_hash_table(comp_page);
+ UnlockPage(comp_page->page);
+ return NULL;
}
***************
*** 445,448 ****
--- 443,447 ----
list_add(&(swp_buffer->list), &swp_free_buffer_head);
+ INIT_LIST_HEAD(&buffer_page->list);
}
}
Index: vswap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v
retrieving revision 1.24
retrieving revision 1.25
diff -C2 -r1.24 -r1.25
*** vswap.c 23 Feb 2002 18:24:11 -0000 1.24
--- vswap.c 25 Feb 2002 19:34:41 -0000 1.25
***************
*** 2,6 ****
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-02-22 11:09:07 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2002-02-25 13:49:32 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 27,30 ****
--- 27,32 ----
struct list_head vswap_address_used_head;
+ static struct pte_list * pte_list_freelist = NULL;
+
/* comp_cache_freeable_space = this is the estimated freeable space in
* the compressed cache, ie, the space in compressed cache minus the
***************
*** 49,57 ****
unsigned short last_page = 0;
- static kmem_cache_t * pte_cachep;
-
- #define alloc_pte_list() \
- ((struct pte_list *) kmem_cache_alloc(pte_cachep, SLAB_ATOMIC))
-
static inline int
comp_cache_mean_size(void) {
--- 51,54 ----
***************
*** 69,75 ****
}
! /* comp_cache_available_vswap(void) - this function checks to see if
! * we have any available vswap entry and also if we can assign any
! * vswap entry. */
int
comp_cache_available_vswap(void) {
--- 66,73 ----
}
! /***
! * comp_cache_available_vswap - this function returns 1 if we have any
! * available vswap entry and also if we can assign any vswap entry.
! */
int
comp_cache_available_vswap(void) {
***************
*** 100,103 ****
--- 98,105 ----
}
+ /***
+ * comp_cache_available_space - broader function which will check also
+ * if we have virtual swap entries to be compressed.
+ */
inline int
comp_cache_available_space(void) {
***************
*** 105,109 ****
return 1;
! /* we can still compress all these entries */
if (vswap_num_reserved_entries > 0)
return 1;
--- 107,111 ----
return 1;
! /* can we still compress all these entries? */
if (vswap_num_reserved_entries > 0)
return 1;
***************
*** 112,120 ****
}
! /* remap_vswap_ptes(entry, page): remap all ptes that are set to a
! * virtual swap entry which has already been assigned to this _entry_
! * real address, ie vswap->real_entry.val == entry.val, to the
! * page. This is much useful in swapoff process. It's called from
! * try_to_unuse():swapfile.c */
void
remap_vswap_ptes(swp_entry_t entry, struct page * page) {
--- 114,130 ----
}
! /***
! * remap_vswap_ptes - remap all pending ptes (ptes set to a virtual
! * swap entry which has already been given a real entry) to a page in
! * memory.
! * @entry: virtual swap entry that will have all its ptes remapped
! * @page: page to which all the ptes are going to be set.
! *
! * This function will map all the ptes still set to this virtual swap
! * entry because we couldn't set them when assigning a real entry to
! * this vswap. It may be called from try_to_unuse():swapfile.c in
! * swapoff process
! *
! */
void
remap_vswap_ptes(swp_entry_t entry, struct page * page) {
***************
*** 156,160 ****
++vma->vm_mm->rss;
! comp_cache_swp_free_generic(old_entry, 0);
next:
--- 166,170 ----
++vma->vm_mm->rss;
! comp_cache_swp_free(old_entry);
next:
***************
*** 164,169 ****
}
swp_entry_t
! get_virtual_swap_page(struct page * page, unsigned short count)
{
swp_entry_t entry;
--- 174,190 ----
}
+ /**
+ * get_virtual_swap_page - returns a vswap entry, if available, and
+ * initializes it.
+ *
+ * This function checks if we have available virtual swap space. If we
+ * do, it removes a virtual swap entry from the free list, zeroing the
+ * data struct, but the count, which will be set to 1. Updates the
+ * control variables: number of used entries and number of reserved
+ * entries.
+ *
+ */
swp_entry_t
! get_virtual_swap_page(void)
{
swp_entry_t entry;
***************
*** 192,196 ****
BUG();
! vswap_address[offset]->count = count;
vswap_address[offset]->pte_list = NULL;
vswap_address[offset]->swap_cache_page = NULL;
--- 213,217 ----
BUG();
! vswap_address[offset]->count = 1;
vswap_address[offset]->pte_list = NULL;
vswap_address[offset]->swap_cache_page = NULL;
***************
*** 209,213 ****
}
!
void
comp_cache_swp_duplicate(swp_entry_t entry)
--- 230,239 ----
}
! /**
! * comp_cache_swp_duplicate - swap_duplicate for virtual swap
! * addresses.
! * @entry: the virtual swap entry which will have its count
! * incremented
! */
void
comp_cache_swp_duplicate(swp_entry_t entry)
***************
*** 229,234 ****
}
int
! comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment)
{
unsigned long offset = SWP_OFFSET(entry);
--- 255,273 ----
}
+ /**
+ * comp_cache_swp_free - swap_free for virtual swap addresses.
+ * @entry: the virtual swap entry which will have its count
+ * decremented and possibly the vswap entry freed.
+ *
+ * This function will decrement the vswap entry counter. If we have
+ * had a real swap address assigned, we will call swap_free() for it,
+ * since we hold a reference to the real address for every pending
+ * pte. If we get to count == 0, the entry will have its struct
+ * initalized and be added to the free list. In the case we have a
+ * fragment (recall that fragments don't hold references on swap
+ * addresses), we will free it too.
+ */
int
! comp_cache_swp_free(swp_entry_t entry)
{
unsigned long offset = SWP_OFFSET(entry);
***************
*** 285,295 ****
BUG();
- /* we won't free the fragment when assigning a real address to
- * a virtual addressed fragment in comp_cache_release() */
- if (!free_fragment) {
- list_del_init(&vswap->list);
- goto add_to_free_list;
- }
-
/* NOTE: that should be checked out after we updated the
* vswap, since it may shrink the cache */
--- 324,327 ----
***************
*** 313,316 ****
--- 345,354 ----
}
+ /**
+ * comp_cache_swp_count - swap_count for virtual swap addresses.
+ * @entry: virtual swap entry that will be returned its counter.
+ *
+ * This function returns the counter for the vswap entry parameter.
+ */
int
comp_cache_swp_count(swp_entry_t entry)
***************
*** 327,336 ****
}
! /* remove_fragment_vswap(fragment) - this function tells the vswap
! * entry that it does not have a fragment any longer.
*
! * That means that we are going to set the fragment variable to
! * VSWAP_RESERVED, remove the vswap entry from the used list and
! * update control variables */
inline void
remove_fragment_vswap(comp_cache_fragment_t * fragment)
--- 365,383 ----
}
! /***
! * remove_fragment_vswap - this function tells the vswap entry that it
! * doesn't have a compressed fragment any longer.
! * @fragment: fragment which will removed from the vswap entry struct.
! *
! * Based on the fragment->index, this function sets the fragment field
! * of vswap struct to the VSWAP_RESERVED, removes the vswap entry from
! * the used list and update some control variables: the number of
! * reserved entries (entries w/o fragment) and also the freeable
! * space. Once the fragment is removed, this vswap entry will be taken
! * as reserved, since it is used but doesn't have a compressed
! * fragment. The number of reserved entries will be used to know if we
! * can or not give away new vswap entries.
*
! */
inline void
remove_fragment_vswap(comp_cache_fragment_t * fragment)
***************
*** 362,371 ****
}
! /* add_fragment_vswap(fragment) - this function tells the vswap entry
! * that it has a compressed fragment.
*
! * That means that we are going to set the fragment variable to the
! * fragment address, add the vswap entry to the used list and update
! * control variables */
inline void
add_fragment_vswap(comp_cache_fragment_t * fragment)
--- 409,424 ----
}
! /***
! * add_fragment_vswap - this function tells the vswap entry that it
! * has a compressed fragment.
! * @fragment: fragment which will added to the vswap entry struct.
! *
! * Based on the fragment->index, this function sets the fragment field
! * of vswap struct to the fragment address, adds the vswap entry to
! * the used list and update some control variables: the number of
! * reserved entries (entries w/o fragment) and also the freeable space
! * (since this fragment does not have backing storage).
*
! */
inline void
add_fragment_vswap(comp_cache_fragment_t * fragment)
***************
*** 397,417 ****
}
! inline void
! pte_list_free(struct pte_list * pte_list, struct pte_list * prev_pte_list, unsigned long offset) {
! if (!prev_pte_list) {
vswap_address[offset]->pte_list = pte_list->next;
! goto out;
! }
! prev_pte_list->next = pte_list->next;
! out:
! kmem_cache_free(pte_cachep, (pte_list));
}
! /* this function is mostly based on page_remove_pmap from reverse
! * mapping patch by Rik van Riel */
! inline void
! remove_pte_vswap(pte_t * ptep)
{
struct pte_list * pte_list, * prev_pte_list = NULL;
--- 450,542 ----
}
! /**
! * pte_list_free - free pte_list structure
! * @pte_list: pte_list struct to free
! * @prev_pte_list: previous pte_list on the list (may be NULL)
! * @page: page this pte_list hangs off (may be NULL)
! *
! * This function unlinks pte_list from the singly linked list it
! * may be on and adds the pte_list to the free list. May also be
! * called for new pte_list structures which aren't on any list yet.
! * Caller needs to hold the pagemap_lru_list.
! *
! * (adapted from Rik van Riel's rmap patch)
! */
! static inline void
! pte_list_free(struct pte_list * pte_list, struct pte_list * prev_pte_list, unsigned long offset)
! {
! if (prev_pte_list)
! prev_pte_list->next = pte_list->next;
! else
vswap_address[offset]->pte_list = pte_list->next;
!
! pte_list->ptep = NULL;
! pte_list->next = pte_list_freelist;
! pte_list_freelist = pte_list;
! }
! /**
! * alloc_new_pte_lists - convert a free page to pte_list structures
! *
! * Grabs a free page and converts it to pte_list structures. We really
! * should pre-allocate these earlier in the pagefault path or come up
! * with some other trick.
! *
! * Note that we cannot use the slab cache because the pte_list structure
! * is way smaller than the minimum size of a slab cache allocation.
! */
! static void alloc_new_pte_lists(void)
! {
! struct pte_list * pte_list = (void *) get_zeroed_page(GFP_ATOMIC);
! int i = PAGE_SIZE / sizeof(struct pte_list);
! if (pte_list) {
! for (; i-- > 0; pte_list++) {
! pte_list->ptep = NULL;
! pte_list->next = pte_list_freelist;
! pte_list_freelist = pte_list;
! }
! } else
! panic("Fix pte_list allocation, you lazy bastard!\n");
}
! /**
! * pte_list_alloc - allocate a pte_list struct
! *
! * Returns a pointer to a fresh pte_list structure. Allocates new
! * pte_list structures as required.
! * Caller needs to hold the pagemap_lru_lock.
! *
! * (adapted from Rik van Riel's rmap patch)
! */
! static inline struct pte_list * pte_list_alloc(void)
! {
! struct pte_list * pte_list;
!
! /* Allocate new pte_list structs as needed. */
! if (!pte_list_freelist)
! alloc_new_pte_lists();
!
! /* Grab the first pte_list from the freelist. */
! pte_list = pte_list_freelist;
! pte_list_freelist = pte_list->next;
! pte_list->next = NULL;
!
! return pte_list;
! }
!
!
! /**
! * remove_pte_vswap - remove a pte from a vswap entry
! * @ptep: pointer to the pte to be removed
! *
! * This function searchs for the pte in the pte_list from its vswap
! * address entry, freeing it. Returns for null, present and real swap
! * adressed ptes.
! *
! * (adapted from Rik van Riel's rmap patch)
! */
! void FASTCALL(remove_pte_vswap(pte_t *));
! void remove_pte_vswap(pte_t * ptep)
{
struct pte_list * pte_list, * prev_pte_list = NULL;
***************
*** 449,454 ****
}
! inline void
! add_swap_cache_page_vswap(struct page * page, swp_entry_t entry)
{
unsigned long offset;
--- 574,620 ----
}
! /**
! * add_pte_vswap - add a pte to a vswap entry pte list
! * @ptep: pointer to the pte to be added
! * @entry: vswap entry to whose list will be added the ptep
! *
! * This function allocates a new pte_list struct, adding to the
! * corresponding virtual swap entry struct. Returns null for real swap
! * entries.
! *
! * (adapted from Rik van Riel's rmap patch)
! */
! void FASTCALL(add_pte_vswap(pte_t *, swp_entry_t));
! void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {
! unsigned long offset;
! struct pte_list * pte_list;
!
! if (!vswap_address(entry))
! return;
!
! offset = SWP_OFFSET(entry);
!
! pte_list = pte_list_alloc();
! pte_list->next = vswap_address[offset]->pte_list;
! pte_list->ptep = ptep;
!
! vswap_address[offset]->pte_list = pte_list;
! }
!
!
!
! /**
! * add_swap_cache_page_vswap - adds the swap cache page to the
! * corresponding vswap entry.
! * @page: page to be added to the struct
! * @entry: vswap address
! *
! * This function adds the virtual swap address entry struct to the
! * page passed as parameter. It's useful to avoid looking for the page
! * whenever some change to the vswap adress is needed.
! *
! */
! void FASTCALL(add_swap_cache_page_vswap(struct page *, swp_entry_t));
! void add_swap_cache_page_vswap(struct page * page, swp_entry_t entry)
{
unsigned long offset;
***************
*** 465,470 ****
}
! inline void
! del_swap_cache_page_vswap(struct page * page)
{
unsigned long offset;
--- 631,648 ----
}
!
! /**
! * del_swap_cache_page_vswap - deletes the swap cache page from the
! * corresponding vswap entry.
! * @page: page to be deleted to the struct
! *
! * This function removes the page from the corresponding virtual swap
! * address entry (we can get the entry from page->index). It will be
! * called when removing a page from swap cache or before assigning a
! * real entry.
! *
! */
! void FASTCALL(del_swap_cache_page_vswap(struct page *));
! void del_swap_cache_page_vswap(struct page * page)
{
unsigned long offset;
***************
*** 484,504 ****
}
! inline void
! add_pte_vswap(pte_t * ptep, swp_entry_t entry) {
! unsigned long offset;
! struct pte_list * tmp_pte_list;
!
! if (!vswap_address(entry))
! return;
!
! offset = SWP_OFFSET(entry);
!
! tmp_pte_list = alloc_pte_list();
! tmp_pte_list->next = vswap_address[offset]->pte_list;
! tmp_pte_list->ptep = ptep;
!
! vswap_address[offset]->pte_list = tmp_pte_list;
! }
!
void
vswap_alloc_and_init(struct vswap_address ** vswap_address, int offset) {
--- 662,676 ----
}
! /**
! * vswap_alloc_and_init - allocates a new virtual swap entry and inits
! * it.
! * @vswap_address: main virtual swap array of pointers to
! * vswap_address structs.
! * @offset: offset within the vswap_address.
! *
! * This function allocates a new virtual swap entry and initializes
! * its struct, adding it to the list of free vswap entries.
! *
! */
void
vswap_alloc_and_init(struct vswap_address ** vswap_address, int offset) {
***************
*** 543,548 ****
last_page_size = (unsigned short *) vmalloc(NUM_MEAN_PAGES * sizeof(unsigned short));
-
- pte_cachep = kmem_cache_create("pte_cache", sizeof(struct pte_list), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
for (i = 0; i < NUM_MEAN_PAGES; i++)
--- 715,718 ----
|