linuxcompressed-checkins Mailing List for Linux Compressed Cache (Page 15)
Status: Beta
Brought to you by:
nitin_sf
You can subscribe to this list here.
| 2001 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
(2) |
Nov
|
Dec
(31) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2002 |
Jan
(28) |
Feb
(50) |
Mar
(29) |
Apr
(6) |
May
(33) |
Jun
(36) |
Jul
(60) |
Aug
(7) |
Sep
(12) |
Oct
|
Nov
(13) |
Dec
(3) |
| 2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
(9) |
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
| 2006 |
Jan
(13) |
Feb
(4) |
Mar
(4) |
Apr
(1) |
May
|
Jun
(22) |
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-13 19:13:01
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv23494/mm/comp_cache
Modified Files:
Makefile main.c
Added Files:
compswap.c proc.c
Log Message:
First batch of changes to remove #ifdefs and make code cleaner.
It follows Documentation/SubmittingPatches document.
--- NEW FILE ---
/*
* linux/mm/comp_cache/vswap.c
*
* Time-stamp: <2001-12-13 13:29:36 rcastro>
*
* Linux Virtual Memory Compressed Cache
*
* Author: Rodrigo S. de Castro <rc...@im...>
* Licensed under the GPL - 2001
*/
#include <linux/comp_cache.h>
void
set_comp_swp_entry(swp_entry_t entry, int compressed, int algorithm)
{
struct swap_info_struct * p;
unsigned long offset, type;
if (vswap_address(entry))
BUG();
if (!entry.val)
goto bad_entry;
type = SWP_TYPE(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = type + swap_info;
offset = SWP_OFFSET(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_unused;
if (!compressed) {
ClearEntryCompressed(p, offset);
ClearEntryWKdm(p, offset);
ClearEntryWK4x4(p, offset);
return;
}
SetEntryCompressed(p, offset);
/* we cannot clear the algorithm relative to the entry until
* we swap it out again, because the swap cache page may be
* freed and thus it will be reread (and decompressed
* again). Therefore that's why we clean the flag here */
ClearEntryWKdm(p, offset);
ClearEntryWK4x4(p, offset);
switch(algorithm) {
case WKDM_IDX:
SetEntryWKdm(p, offset);
break;
case WK4X4_IDX:
SetEntryWK4x4(p, offset);
break;
default:
BUG();
}
out:
return;
bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
bad_file:
printk("Bad swap file entry (scse) %08lx\n", entry.val);
goto out;
bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
}
unsigned short
swap_algorithm(swp_entry_t entry)
{
struct swap_info_struct * p;
unsigned long offset, type;
int retval = -1;
if (vswap_address(entry))
BUG();
if (!entry.val)
goto bad_entry;
type = SWP_TYPE(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = type + swap_info;
offset = SWP_OFFSET(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_unused;
if (!EntryCompressed(p, offset))
goto bad_compressed;
if (EntryWKdm(p, offset))
retval = WKDM_IDX;
if (EntryWK4x4(p, offset))
retval = WK4X4_IDX;
if (retval == -1)
BUG();
out:
return retval;
bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
bad_file:
printk("Bad swap file entry (swap_algorithm) %08lx\n", entry.val);
goto out;
bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
bad_compressed:
printk("Swap offset entry not compressed %08lx\n", entry.val);
goto out;
}
int
swap_compressed(swp_entry_t entry)
{
struct swap_info_struct * p;
unsigned long offset, type;
int retval = 0;
if (vswap_address(entry))
BUG();
if (!entry.val)
goto bad_entry;
type = SWP_TYPE(entry);
if (type >= nr_swapfiles)
goto bad_file;
p = type + swap_info;
offset = SWP_OFFSET(entry);
if (offset >= p->max)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_unused;
retval = EntryCompressed(p, offset);
out:
return retval;
bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
bad_file:
printk("Bad swap file entry (swap_compressed) %08lx\n", entry.val);
goto out;
bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
}
Index: Makefile
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/Makefile,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** Makefile 2001/10/01 22:43:59 1.2
--- Makefile 2001/12/13 19:12:58 1.3
***************
*** 5,9 ****
O_TARGET := comp_cache.o
! obj-y := main.o vswap.o free.o swapout.o swapin.o avl.o aux.o WK4x4.o WKdm.o
include $(TOPDIR)/Rules.make
--- 5,13 ----
O_TARGET := comp_cache.o
! obj-y := main.o vswap.o free.o swapout.o swapin.o avl.o aux.o proc.o WK4x4.o WKdm.o
!
! ifeq ($(CONFIG_COMP_SWAP),y)
! obj-y += compswap.o
! endif
include $(TOPDIR)/Rules.make
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** main.c 2001/12/12 20:45:46 1.7
--- main.c 2001/12/13 19:12:58 1.8
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2001-12-10 16:59:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2001-12-13 10:43:48 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 42,51 ****
kmem_cache_t * fragment_cachep;
- int tmp_comp_cache_size = 20, tmp_comp_cache_size_min = 0, tmp_comp_cache_size_max = 50;
-
/* compression algorithms */
compression_algorithm_t compression_algorithms[NUM_ALGORITHMS];
int current_algorithm;
- int algorithm_min = WKDM_IDX<WK4X4_IDX?WKDM_IDX:WK4X4_IDX, algorithm_max = WKDM_IDX>WK4X4_IDX?WKDM_IDX:WK4X4_IDX;
static char buffer_compressed[MAX_COMPRESSED_SIZE];
--- 42,48 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-13 19:13:00
|
Update of /cvsroot/linuxcompressed/linux/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv23494/kernel
Modified Files:
sysctl.c
Log Message:
First batch of changes to remove #ifdefs and make code cleaner.
It follows Documentation/SubmittingPatches document.
Index: sysctl.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/kernel/sysctl.c,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** sysctl.c 2001/12/12 20:45:46 1.2
--- sysctl.c 2001/12/13 19:12:58 1.3
***************
*** 50,57 ****
extern int core_uses_pid;
extern int cad_pid;
- #ifdef CONFIG_COMP_CACHE
- extern int tmp_comp_cache_size, tmp_comp_cache_size_min, tmp_comp_cache_size_max;
- extern int current_algorithm, algorithm_min, algorithm_max;
- #endif
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
--- 50,53 ----
***************
*** 114,118 ****
static ctl_table vm_table[];
#ifdef CONFIG_COMP_CACHE
! static ctl_table comp_cache_table[];
#endif
#ifdef CONFIG_NET
--- 110,114 ----
static ctl_table vm_table[];
#ifdef CONFIG_COMP_CACHE
! extern ctl_table comp_cache_table[];
#endif
#ifdef CONFIG_NET
***************
*** 287,302 ****
{0}
};
-
- #ifdef CONFIG_COMP_CACHE
- static ctl_table comp_cache_table[] = {
- {CC_SIZE, "size", &tmp_comp_cache_size, sizeof(int), 0644, NULL,
- &proc_dointvec_minmax, &sysctl_intvec, NULL, &tmp_comp_cache_size_min,
- &tmp_comp_cache_size_max},
- {CC_ALGORITHM, "algorithm", ¤t_algorithm, sizeof(int), 0644, NULL,
- &proc_dointvec_minmax, &sysctl_intvec, NULL, &algorithm_min,
- &algorithm_max},
- {0}
- };
- #endif
static ctl_table proc_table[] = {
--- 283,286 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-13 19:13:00
|
Update of /cvsroot/linuxcompressed/linux/fs/proc
In directory usw-pr-cvs1:/tmp/cvs-serv23494/fs/proc
Modified Files:
proc_misc.c
Log Message:
First batch of changes to remove #ifdefs and make code cleaner.
It follows Documentation/SubmittingPatches document.
Index: proc_misc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/fs/proc/proc_misc.c,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** proc_misc.c 2001/12/12 20:45:46 1.2
--- proc_misc.c 2001/12/13 19:12:57 1.3
***************
*** 37,43 ****
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
- #ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
- #endif
#include <asm/uaccess.h>
--- 37,41 ----
***************
*** 316,396 ****
return proc_calc_metrics(page, start, off, count, eof, len);
}
-
- #ifdef CONFIG_COMP_CACHE
- void
- print_comp_cache_stats(unsigned short alg_idx, char * page, int * length)
- {
- unsigned int compression_ratio = 0, discard_ratio = 0;
- unsigned long long mean_size = 0, mean_comp_cycles = 0, mean_decomp_cycles = 0;
- compression_algorithm_t * algorithm = &compression_algorithms[alg_idx];
- stats_summary_t * stats = &algorithm->stats;
-
- if (!stats->pgccout) {
- *length += sprintf(page + *length, "compressed cache statistics\n");
- *length += sprintf(page + *length, "no pages have been compressed with %s%s\n\n", algorithm->name, current_msg);
- return;
- }
-
- mean_size = big_division(stats->comp_size_sum, stats->pgccout);
- mean_comp_cycles = big_division(stats->comp_cycles_sum, stats->pgccout);
- mean_decomp_cycles = big_division(stats->decomp_cycles_sum, stats->pgccout);
- compression_ratio = ((big_division(stats->comp_size_sum, stats->pgccout)*100)/PAGE_SIZE);
- discard_ratio = (int) ((stats->discarded_pages * 100)/stats->pgccout);
-
- *length += sprintf(page + *length, "compressed cache - statistics\n");
- *length += sprintf(page + *length, "-- algorithm --\n%s%s\n", algorithm->name, current_msg);
- *length += sprintf(page + *length, "-- compressed pages --\n%d\n", stats->pgccout);
- *length += sprintf(page + *length, "-- compressed pages faulted in --\n%d\n", stats->pgccin);
-
- *length += sprintf(page + *length, "-- size --\n");
- *length += sprintf(page + *length, "min | max | mean\n");
- *length += sprintf(page + *length,
- " %8d | "
- " %8d | "
- " %9Lu\n",
- (stats->comp_size_min == INF?0:stats->comp_size_min),
- stats->comp_size_max,
- mean_size);
-
- *length += sprintf(page + *length, "-- compression cycles --\n");
- *length += sprintf(page + *length, "min | max | mean\n");
- *length += sprintf(page + *length,
- "%10lu | "
- "%10lu | "
- "%11Lu\n",
- (stats->comp_cycles_min == INF?0:stats->comp_cycles_min),
- stats->comp_cycles_max,
- mean_comp_cycles);
-
- *length += sprintf(page + *length, "-- decompression cycles --\n");
- *length += sprintf(page + *length, "min | max | mean\n");
- *length += sprintf(page + *length,
- "%10lu | "
- "%10lu | "
- "%11Lu\n\n",
- (stats->decomp_cycles_min == INF?0:stats->decomp_cycles_min),
- stats->decomp_cycles_max,
- mean_decomp_cycles);
-
- *length += sprintf(page + *length,
- "compression ratio: %8d%%\n"
- "discarded pages: %8d\n"
- "discarded ratio: %8d%%\n\n",
- compression_ratio,
- stats->discarded_pages,
- discard_ratio);
- }
-
- static int
- comp_cache_stat_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data)
- {
- int length = 0, i;
-
- for (i = 0; i < NUM_ALGORITHMS; i++)
- print_comp_cache_stats(i, page, &length);
-
- return proc_calc_metrics(page, start, off, count, eof, length);
- }
- #endif
static int devices_read_proc(char *page, char **start, off_t off,
--- 314,317 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-13 19:13:00
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv23494/include/linux
Modified Files:
comp_cache.h swap.h sysctl.h
Log Message:
First batch of changes to remove #ifdefs and make code cleaner.
It follows Documentation/SubmittingPatches document.
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.30
retrieving revision 1.31
diff -C2 -r1.30 -r1.31
*** comp_cache.h 2001/12/12 20:45:46 1.30
--- comp_cache.h 2001/12/13 19:12:57 1.31
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2001-12-11 12:56:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2001-12-13 15:02:58 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 19,22 ****
--- 19,23 ----
#include <linux/fs.h>
#include <linux/swapctl.h>
+ #include <linux/pagemap.h>
#include <asm/atomic.h>
***************
*** 321,330 ****
--- 322,361 ----
/* swapin.c */
+ #ifdef CONFIG_COMP_CACHE
+
struct page * lookup_comp_cache(swp_entry_t);
struct page * shmem_lookup_comp_cache(swp_entry_t *, struct address_space *, unsigned long);
+ #else
+
+ static inline struct page * lookup_comp_cache(swp_entry_t entry)
+ {
+ return lookup_swap_cache(entry);
+ }
+
+ static inline struct page * shmem_lookup_comp_cache(swp_entry_t * entry, struct address_space * mapping, unsigned long idx)
+ {
+ return find_get_page(&swapper_space, entry->val);
+ }
+
+ #endif /* CONFIG_COMP_CACHE */
+
/* main.c */
+ #ifdef CONFIG_COMP_CACHE
+
int compress_page(struct page *);
+ void comp_cache_init(void);
+
+ #else
+
+ extern int swap_writepage(struct page*);
+
+ static inline int compress_page(struct page * page) {
+ return swap_writepage(page);
+ }
+ static inline void comp_cache_init(void) {};
+ #endif /* CONFIG_COMP_CACHE */
+
/* vswap.c */
struct vswap_address {
***************
*** 344,349 ****
--- 375,386 ----
};
+ extern struct list_head vswap_address_free_head, vswap_address_used_head;
+ extern struct vswap_address ** vswap_address;
+ extern unsigned long long estimated_free_space;
#define COMP_CACHE_SWP_TYPE MAX_SWAPFILES
+
+ #ifdef CONFIG_COMP_CACHE
+
#define VSWAP_RESERVED ((comp_cache_fragment_t *) 0xffffffff)
***************
*** 357,364 ****
#define comp_cache_swp_free(swp_entry) comp_cache_swp_free_generic(swp_entry, 1)
- extern struct list_head vswap_address_free_head, vswap_address_used_head;
- extern struct vswap_address ** vswap_address;
- extern unsigned long long estimated_free_space;
-
void comp_cache_swp_duplicate(swp_entry_t);
int comp_cache_swp_free_generic(swp_entry_t, int);
--- 394,397 ----
***************
*** 377,384 ****
inline void unlock_vswap(swp_entry_t);
/* free.c */
- int comp_cache_release(swp_entry_t);
inline void comp_cache_free(comp_cache_fragment_t *);
/* aux.c */
unsigned long long big_division(unsigned long long, unsigned long long);
--- 410,458 ----
inline void unlock_vswap(swp_entry_t);
+ #else
+
+ #define vswap_address_available() (0)
+ #define vswap_info_struct(p) (0)
+ #define vswap_address(entry) (0)
+
+ #define comp_cache_swp_free(swp_entry) comp_cache_swp_free_generic(swp_entry, 1)
+
+ static inline void comp_cache_swp_duplicate(swp_entry_t entry) {};
+ static inline int comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment)
+ {
+ return 0;
+ }
+ static inline int comp_cache_swp_count(swp_entry_t entry)
+ {
+ return 0;
+ }
+
+ static inline int comp_cache_available_space(void)
+ {
+ return 0;
+ }
+ static inline swp_entry_t get_virtual_swap_page(struct page * page, unsigned short count)
+ {
+ return ((swp_entry_t) { 0 });
+ }
+ static inline void add_pte_vswap(pte_t * ptep, swp_entry_t entry) {};
+ static inline void remove_pte_vswap(pte_t * ptep) {};
+ static inline void add_swap_cache_page_vswap(struct page * page, swp_entry_t entry) {};
+ static inline void del_swap_cache_page_vswap(struct page * page) {};
+
+ static inline void lock_vswap(swp_entry_t entry) {};
+ static inline void unlock_vswap(swp_entry_t entry) {};
+
+ #endif
+
/* free.c */
inline void comp_cache_free(comp_cache_fragment_t *);
+ #ifdef CONFIG_COMP_CACHE
+ int comp_cache_release(swp_entry_t);
+ #else
+ static inline int comp_cache_release(swp_entry_t entry) { return 0; }
+ #endif
+
/* aux.c */
unsigned long long big_division(unsigned long long, unsigned long long);
***************
*** 394,397 ****
--- 468,501 ----
inline void remove_fragment_from_hash_table(comp_cache_fragment_t *);
+ /* proc.c */
+ void print_comp_cache_stats(unsigned short, char *, int *);
+ int comp_cache_stat_read_proc(char *, char **, off_t, int, int *, void *);
+
+ /* compswap.c */
+ #ifdef CONFIG_COMP_SWAP
+
+ void set_comp_swp_entry(swp_entry_t, int, int);
+ int swap_compressed(swp_entry_t);
+ unsigned short swap_algorithm(swp_entry_t);
+
+ #define CS_WKdm 0
+ #define CS_WK4x4 1
+
+ #define CS_compressed 15
+
+ #define SetEntryWK4x4(p, offset) set_bit(CS_WK4x4, &(p)->swap_comp[offset])
+ #define ClearEntryWK4x4(p, offset) clear_bit(CS_WK4x4, &(p)->swap_comp[offset])
+ #define EntryWK4x4(p, offset) test_bit(CS_WK4x4, &(p)->swap_comp[offset])
+
+ #define SetEntryWKdm(p, offset) set_bit(CS_WKdm, &(p)->swap_comp[offset])
+ #define ClearEntryWKdm(p, offset) clear_bit(CS_WKdm, &(p)->swap_comp[offset])
+ #define EntryWKdm(p, offset) test_bit(CS_WKdm, &(p)->swap_comp[offset])
+
+ #define SetEntryCompressed(p, offset) set_bit(CS_compressed, &(p)->swap_comp[offset])
+ #define ClearEntryCompressed(p, offset) clear_bit(CS_compressed, &(p)->swap_comp[offset])
+ #define EntryCompressed(p, offset) test_bit(CS_compressed, &(p)->swap_comp[offset])
#endif
+
+ #endif
+
Index: swap.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/swap.h,v
retrieving revision 1.12
retrieving revision 1.13
diff -C2 -r1.12 -r1.13
*** swap.h 2001/12/12 20:45:46 1.12
--- swap.h 2001/12/13 19:12:57 1.13
***************
*** 223,250 ****
extern void shmem_unuse(swp_entry_t entry, struct page *page);
- #ifdef CONFIG_COMP_SWAP
- void set_comp_swp_entry(swp_entry_t, int, int);
- int swap_compressed(swp_entry_t);
- unsigned short swap_algorithm(swp_entry_t);
-
-
- #define CS_WKdm 0
- #define CS_WK4x4 1
-
- #define CS_compressed 15
-
- #define SetEntryWK4x4(p, offset) set_bit(CS_WK4x4, &(p)->swap_comp[offset])
- #define ClearEntryWK4x4(p, offset) clear_bit(CS_WK4x4, &(p)->swap_comp[offset])
- #define EntryWK4x4(p, offset) test_bit(CS_WK4x4, &(p)->swap_comp[offset])
-
- #define SetEntryWKdm(p, offset) set_bit(CS_WKdm, &(p)->swap_comp[offset])
- #define ClearEntryWKdm(p, offset) clear_bit(CS_WKdm, &(p)->swap_comp[offset])
- #define EntryWKdm(p, offset) test_bit(CS_WKdm, &(p)->swap_comp[offset])
-
- #define SetEntryCompressed(p, offset) set_bit(CS_compressed, &(p)->swap_comp[offset])
- #define ClearEntryCompressed(p, offset) clear_bit(CS_compressed, &(p)->swap_comp[offset])
- #define EntryCompressed(p, offset) test_bit(CS_compressed, &(p)->swap_comp[offset])
- #endif
-
#endif /* __KERNEL__*/
--- 223,226 ----
Index: sysctl.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/sysctl.h,v
retrieving revision 1.2
retrieving revision 1.3
diff -C2 -r1.2 -r1.3
*** sysctl.h 2001/12/12 20:45:46 1.2
--- sysctl.h 2001/12/13 19:12:57 1.3
***************
*** 142,160 ****
VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
VM_MIN_READAHEAD=12, /* Min file readahead */
- #ifdef CONFIG_COMP_CACHE
VM_MAX_READAHEAD=13, /* Max file readahead */
VM_CTL_COMP_CACHE=14
- #else
- VM_MAX_READAHEAD=13 /* Max file readahead */
- #endif
};
-
- #ifdef CONFIG_COMP_CACHE
- enum
- {
- CC_SIZE=1,
- CC_ALGORITHM=2
- };
- #endif
/* CTL_NET names: */
--- 142,148 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:51
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv17791/mm
Modified Files:
Makefile memory.c mmap.c oom_kill.c page_alloc.c shmem.c
swap_state.c swapfile.c vmscan.c
Removed Files:
filemap.c
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: Makefile
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/Makefile,v
retrieving revision 1.4
retrieving revision 1.5
diff -C2 -r1.4 -r1.5
*** Makefile 2001/08/17 18:35:11 1.4
--- Makefile 2001/12/12 20:45:46 1.5
***************
*** 10,14 ****
O_TARGET := mm.o
! export-objs := shmem.o
obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
--- 10,14 ----
O_TARGET := mm.o
! export-objs := shmem.o filemap.o
obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.12
retrieving revision 1.13
diff -C2 -r1.12 -r1.13
*** memory.c 2001/09/29 20:30:14 1.12
--- memory.c 2001/12/12 20:45:46 1.13
***************
*** 82,94 ****
if ((!VALID_PAGE(page)) || PageReserved(page))
return;
! /*
! * free_page() used to be able to clear swap cache
! * entries. We may now have to do it manually.
! */
! if (page->mapping) {
! if (pte_dirty(pte))
! set_page_dirty(page);
! }
!
free_page_and_swap_cache(page);
}
--- 82,87 ----
if ((!VALID_PAGE(page)) || PageReserved(page))
return;
! if (pte_dirty(pte))
! set_page_dirty(page);
free_page_and_swap_cache(page);
}
***************
*** 188,192 ****
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
! unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
src_pgd = pgd_offset(src, address)-1;
--- 181,185 ----
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
! unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address)-1;
***************
*** 326,330 ****
continue;
if (pte_present(pte)) {
! freed ++;
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
--- 319,325 ----
continue;
if (pte_present(pte)) {
! struct page *page = pte_page(pte);
! if (VALID_PAGE(page) && !PageReserved(page))
! freed ++;
/* This will eventually call __free_pte on the pte. */
tlb_remove_page(tlb, ptep, address + offset);
***************
*** 333,337 ****
remove_pte_vswap(ptep);
#endif
! swap_free(pte_to_swp_entry(pte));
pte_clear(ptep);
}
--- 328,332 ----
remove_pte_vswap(ptep);
#endif
! free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep);
}
***************
*** 535,539 ****
if (map) {
flush_dcache_page(map);
! atomic_inc(&map->count);
} else
printk (KERN_INFO "Mapped page missing [%d]\n", i);
--- 530,534 ----
if (map) {
flush_dcache_page(map);
! page_cache_get(map);
} else
printk (KERN_INFO "Mapped page missing [%d]\n", i);
***************
*** 603,607 ****
if (iobuf->locked)
UnlockPage(map);
! __free_page(map);
}
}
--- 598,602 ----
if (iobuf->locked)
UnlockPage(map);
! page_cache_release(map);
}
}
***************
*** 924,928 ****
* and potentially makes it more efficient.
*
! * We hold the mm semaphore and the page_table_lock on entry and exit.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
--- 919,924 ----
* and potentially makes it more efficient.
*
! * We hold the mm semaphore and the page_table_lock on entry and exit
! * with the page_table_lock released.
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
***************
*** 934,972 ****
if (!VALID_PAGE(old_page))
goto bad_wp_page;
!
! /*
! * We can avoid the copy if:
! * - we're the only user (count == 1)
! * - the only other user is the swap cache,
! * and the only swap cache user is itself,
! * in which case we can just continue to
! * use the same swap cache (it will be
! * marked dirty).
! */
! switch (page_count(old_page)) {
! int can_reuse;
! case 3:
! if (!old_page->buffers)
! break;
! /* FallThrough */
! case 2:
! if (!PageSwapCache(old_page))
! break;
! if (TryLockPage(old_page))
! break;
! /* Recheck swapcachedness once the page is locked */
! can_reuse = exclusive_swap_page(old_page);
! if (can_reuse)
! delete_from_swap_cache(old_page);
! UnlockPage(old_page);
! if (!can_reuse)
! break;
! /* FallThrough */
! case 1:
! if (PageReserved(old_page))
! break;
! flush_cache_page(vma, address);
! establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
! return 1; /* Minor fault */
}
--- 930,943 ----
if (!VALID_PAGE(old_page))
goto bad_wp_page;
!
! if (!TryLockPage(old_page)) {
! int reuse = can_share_swap_page(old_page);
! unlock_page(old_page);
! if (reuse) {
! flush_cache_page(vma, address);
! establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
! spin_unlock(&mm->page_table_lock);
! return 1; /* Minor fault */
! }
}
***************
*** 981,985 ****
goto no_mem;
copy_cow_page(old_page,new_page,address);
- page_cache_release(old_page);
/*
--- 952,955 ----
***************
*** 991,1007 ****
++mm->rss;
break_cow(vma, new_page, address, page_table);
/* Free the old page.. */
new_page = old_page;
}
page_cache_release(new_page);
return 1; /* Minor fault */
bad_wp_page:
printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
no_mem:
page_cache_release(old_page);
- spin_lock(&mm->page_table_lock);
return -1;
}
--- 961,980 ----
++mm->rss;
break_cow(vma, new_page, address, page_table);
+ lru_cache_add(new_page);
/* Free the old page.. */
new_page = old_page;
}
+ spin_unlock(&mm->page_table_lock);
page_cache_release(new_page);
+ page_cache_release(old_page);
return 1; /* Minor fault */
bad_wp_page:
+ spin_unlock(&mm->page_table_lock);
printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
return -1;
no_mem:
page_cache_release(old_page);
return -1;
}
***************
*** 1108,1115 ****
num = valid_swaphandles(entry, &offset);
for (i = 0; i < num; offset++, i++) {
- /* Don't block on I/O for read-ahead */
- if (atomic_read(&nr_async_pages) >=
- pager_daemon.swap_cluster << page_cluster)
- break;
/* Ok, do the async read-ahead now */
#ifdef CONFIG_COMP_CACHE
--- 1081,1084 ----
***************
*** 1128,1132 ****
/*
! * We hold the mm semaphore and the page_table_lock on entry and exit.
*/
static int do_swap_page(struct mm_struct * mm,
--- 1097,1102 ----
/*
! * We hold the mm semaphore and the page_table_lock on entry and
! * should release the pagetable lock on exit..
*/
static int do_swap_page(struct mm_struct * mm,
***************
*** 1151,1167 ****
#else
page = lookup_swap_cache(entry);
#endif
if (!page) {
- lock_kernel();
swapin_readahead(entry);
page = read_swap_cache_async(entry);
- unlock_kernel();
if (!page) {
- spin_lock(&mm->page_table_lock);
/*
* Back out if somebody else faulted in this pte while
* we released the page table lock.
*/
! return pte_same(*page_table, orig_pte) ? -1 : 1;
}
--- 1121,1143 ----
#else
page = lookup_swap_cache(entry);
+
+ if ((entry.val & 0x2) == 1) {
+ printk("entry %08lx\n", entry.val);
+ BUG();
+ }
#endif
if (!page) {
swapin_readahead(entry);
page = read_swap_cache_async(entry);
if (!page) {
/*
* Back out if somebody else faulted in this pte while
* we released the page table lock.
*/
! int retval;
! spin_lock(&mm->page_table_lock);
! retval = pte_same(*page_table, orig_pte) ? -1 : 1;
! spin_unlock(&mm->page_table_lock);
! return retval;
}
***************
*** 1170,1178 ****
}
- /*
- * Freeze the "shared"ness of the page, ie page_count + swap_count.
- * Must lock page before transferring our swap count to already
- * obtained page count.
- */
lock_page(page);
--- 1146,1149 ----
***************
*** 1183,1187 ****
spin_lock(&mm->page_table_lock);
if (!pte_same(*page_table, orig_pte)) {
! UnlockPage(page);
page_cache_release(page);
#ifdef CONFIG_COMP_CACHE
--- 1154,1159 ----
spin_lock(&mm->page_table_lock);
if (!pte_same(*page_table, orig_pte)) {
! spin_unlock(&mm->page_table_lock);
! unlock_page(page);
page_cache_release(page);
#ifdef CONFIG_COMP_CACHE
***************
*** 1220,1241 ****
#endif
- /* The page isn't present yet, go ahead with the fault. */
- mm->rss++;
- pte = mk_pte(page, vma->vm_page_prot);
-
#ifdef CONFIG_COMP_CACHE
remove_pte_vswap(page_table);
unlock_vswap(entry);
#endif
swap_free(entry);
! mark_page_accessed(page);
! if (exclusive_swap_page(page)) {
! if (vma->vm_flags & VM_WRITE)
! pte = pte_mkwrite(pte);
! pte = pte_mkdirty(pte);
! delete_from_swap_cache(page);
! }
! UnlockPage(page);
flush_page_to_ram(page);
flush_icache_page(vma, page);
--- 1192,1212 ----
#endif
#ifdef CONFIG_COMP_CACHE
remove_pte_vswap(page_table);
unlock_vswap(entry);
#endif
+
+ /* The page isn't present yet, go ahead with the fault. */
+
swap_free(entry);
! if (vm_swap_full())
! remove_exclusive_swap_page(page);
+ mm->rss++;
+ pte = mk_pte(page, vma->vm_page_prot);
+ if (write_access && can_share_swap_page(page))
+ pte = pte_mkdirty(pte_mkwrite(pte));
+ unlock_page(page);
+
flush_page_to_ram(page);
flush_icache_page(vma, page);
***************
*** 1244,1247 ****
--- 1215,1219 ----
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
+ spin_unlock(&mm->page_table_lock);
return ret;
}
***************
*** 1274,1277 ****
--- 1246,1250 ----
if (!pte_none(*page_table)) {
page_cache_release(page);
+ spin_unlock(&mm->page_table_lock);
return 1;
}
***************
*** 1279,1282 ****
--- 1252,1256 ----
flush_page_to_ram(page);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+ lru_cache_add(page);
}
***************
*** 1285,1292 ****
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry);
return 1; /* Minor fault */
no_mem:
- spin_lock(&mm->page_table_lock);
return -1;
}
--- 1259,1266 ----
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, entry);
+ spin_unlock(&mm->page_table_lock);
return 1; /* Minor fault */
no_mem:
return -1;
}
***************
*** 1302,1306 ****
*
* This is called with the MM semaphore held and the page table
! * spinlock held.
*/
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
--- 1276,1280 ----
*
* This is called with the MM semaphore held and the page table
! * spinlock held. Exit with the spinlock released.
*/
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
***************
*** 1314,1329 ****
spin_unlock(&mm->page_table_lock);
! /*
! * The third argument is "no_share", which tells the low-level code
! * to copy, not share the page even if sharing is possible. It's
! * essentially an early COW detection.
! */
! new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
- spin_lock(&mm->page_table_lock);
if (new_page == NULL) /* no page was available -- SIGBUS */
return 0;
if (new_page == NOPAGE_OOM)
return -1;
/*
* This silly early PAGE_DIRTY setting removes a race
--- 1288,1312 ----
spin_unlock(&mm->page_table_lock);
! new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
if (new_page == NULL) /* no page was available -- SIGBUS */
return 0;
if (new_page == NOPAGE_OOM)
return -1;
+
+ /*
+ * Should we do an early C-O-W break?
+ */
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ struct page * page = alloc_page(GFP_HIGHUSER);
+ if (!page)
+ return -1;
+ copy_highpage(page, new_page);
+ page_cache_release(new_page);
+ lru_cache_add(page);
+ new_page = page;
+ }
+
+ spin_lock(&mm->page_table_lock);
/*
* This silly early PAGE_DIRTY setting removes a race
***************
*** 1342,1354 ****
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
! if (write_access) {
entry = pte_mkwrite(pte_mkdirty(entry));
- } else if (page_count(new_page) > 1 &&
- !(vma->vm_flags & VM_SHARED))
- entry = pte_wrprotect(entry);
set_pte(page_table, entry);
} else {
/* One of our sibling threads was faster, back out. */
page_cache_release(new_page);
return 1;
}
--- 1325,1335 ----
flush_icache_page(vma, new_page);
entry = mk_pte(new_page, vma->vm_page_prot);
! if (write_access)
entry = pte_mkwrite(pte_mkdirty(entry));
set_pte(page_table, entry);
} else {
/* One of our sibling threads was faster, back out. */
page_cache_release(new_page);
+ spin_unlock(&mm->page_table_lock);
return 1;
}
***************
*** 1356,1359 ****
--- 1337,1341 ----
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
+ spin_unlock(&mm->page_table_lock);
return 2; /* Major fault */
}
***************
*** 1376,1379 ****
--- 1358,1364 ----
* so we don't need to worry about a page being suddenly been added into
* our VM.
+ *
+ * We enter with the pagetable spinlock held, we are supposed to
+ * release it when done.
*/
static inline int handle_pte_fault(struct mm_struct *mm,
***************
*** 1403,1406 ****
--- 1388,1392 ----
entry = pte_mkyoung(entry);
establish_pte(vma, address, pte, entry);
+ spin_unlock(&mm->page_table_lock);
return 1;
}
***************
*** 1412,1416 ****
unsigned long address, int write_access)
{
- int ret = -1;
pgd_t *pgd;
pmd_t *pmd;
--- 1398,1401 ----
***************
*** 1429,1436 ****
pte_t * pte = pte_alloc(mm, pmd, address);
if (pte)
! ret = handle_pte_fault(mm, vma, address, write_access, pte);
}
spin_unlock(&mm->page_table_lock);
! return ret;
}
--- 1414,1421 ----
pte_t * pte = pte_alloc(mm, pmd, address);
if (pte)
! return handle_pte_fault(mm, vma, address, write_access, pte);
}
spin_unlock(&mm->page_table_lock);
! return -1;
}
Index: mmap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/mmap.c,v
retrieving revision 1.3
retrieving revision 1.4
diff -C2 -r1.3 -r1.4
*** mmap.c 2001/09/29 20:30:14 1.3
--- mmap.c 2001/12/12 20:45:46 1.4
***************
*** 72,77 ****
return 1;
! free = atomic_read(&buffermem_pages);
! free += atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;
--- 72,77 ----
return 1;
! /* The page cache contains buffer pages these days.. */
! free = atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;
***************
*** 81,85 ****
* and in the swapper space. At the same time, this compensates
* for the swap-space over-allocation (ie "nr_swap_pages" being
! * too small.
*/
free += swapper_space.nrpages;
--- 81,85 ----
* and in the swapper space. At the same time, this compensates
* for the swap-space over-allocation (ie "nr_swap_pages" being
! * too small.
*/
free += swapper_space.nrpages;
Index: oom_kill.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/oom_kill.c,v
retrieving revision 1.4
retrieving revision 1.5
diff -C2 -r1.4 -r1.5
*** oom_kill.c 2001/09/29 20:30:14 1.4
--- oom_kill.c 2001/12/12 20:45:46 1.5
***************
*** 21,28 ****
#include <linux/swapctl.h>
#include <linux/timex.h>
-
#ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
#endif
/* #define DEBUG */
--- 21,28 ----
#include <linux/swapctl.h>
#include <linux/timex.h>
#ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
#endif
+
/* #define DEBUG */
***************
*** 154,158 ****
*/
p->counter = 5 * HZ;
! p->flags |= PF_MEMALLOC;
/* This process has hardware access, be more careful. */
--- 154,158 ----
*/
p->counter = 5 * HZ;
! p->flags |= PF_MEMALLOC | PF_MEMDIE;
/* This process has hardware access, be more careful. */
***************
*** 172,176 ****
* don't have to be perfect here, we just have to be good.
*/
! void oom_kill(void)
{
struct task_struct *p = select_bad_process(), *q;
--- 172,176 ----
* don't have to be perfect here, we just have to be good.
*/
! static void oom_kill(void)
{
struct task_struct *p = select_bad_process(), *q;
***************
*** 195,197 ****
--- 195,248 ----
schedule();
return;
+ }
+
+ /**
+ * out_of_memory - is the system out of memory?
+ */
+ void out_of_memory(void)
+ {
+ static unsigned long first, last, count;
+ unsigned long now, since;
+
+ /*
+ * Enough swap space left? Not OOM.
+ */
+ if (nr_swap_pages > 0)
+ return;
+
+ now = jiffies;
+ since = now - last;
+ last = now;
+
+ /*
+ * If it's been a long time since last failure,
+ * we're not oom.
+ */
+ last = now;
+ if (since > 5*HZ)
+ goto reset;
+
+ /*
+ * If we haven't tried for at least one second,
+ * we're not really oom.
+ */
+ since = now - first;
+ if (since < HZ)
+ return;
+
+ /*
+ * If we have gotten only a few failures,
+ * we're not really oom.
+ */
+ if (++count < 10)
+ return;
+
+ /*
+ * Ok, really out of memory. Kill something.
+ */
+ oom_kill();
+
+ reset:
+ first = now;
+ count = 0;
}
Index: page_alloc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v
retrieving revision 1.11
retrieving revision 1.12
diff -C2 -r1.11 -r1.12
*** page_alloc.c 2001/10/01 22:43:59 1.11
--- page_alloc.c 2001/12/12 20:45:46 1.12
***************
*** 28,32 ****
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
! static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 32, 128, 128, };
static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
--- 28,32 ----
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
! static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
***************
*** 85,94 ****
if (PageLocked(page))
BUG();
! if (PageDecrAfter(page))
BUG();
if (PageActive(page))
BUG();
- if (PageInactive(page))
- BUG();
#ifdef CONFIG_COMP_SWAP
page->flags &= ~(1<<PG_comp_swap_cache);
--- 85,92 ----
if (PageLocked(page))
BUG();
! if (PageLRU(page))
BUG();
if (PageActive(page))
BUG();
#ifdef CONFIG_COMP_SWAP
page->flags &= ~(1<<PG_comp_swap_cache);
***************
*** 147,158 ****
local_freelist:
! /*
! * This is a little subtle: if the allocation order
! * wanted is major than zero we'd better take all the pages
! * local since we must deal with fragmentation too and we
! * can't rely on the nr_local_pages information.
! */
! if (current->nr_local_pages && !current->allocation_order)
goto back_local_freelist;
list_add(&page->list, ¤t->local_pages);
--- 145,152 ----
local_freelist:
! if (current->nr_local_pages)
goto back_local_freelist;
+ if (in_interrupt())
+ goto back_local_freelist;
list_add(&page->list, ¤t->local_pages);
***************
*** 216,221 ****
set_page_count(page, 1);
if (BAD_RANGE(zone,page))
BUG();
! DEBUG_LRU_PAGE(page);
return page;
}
--- 210,218 ----
set_page_count(page, 1);
if (BAD_RANGE(zone,page))
+ BUG();
+ if (PageLRU(page))
BUG();
! if (PageActive(page))
! BUG();
return page;
}
***************
*** 282,291 ****
if (PageLocked(page))
BUG();
! if (PageDecrAfter(page))
BUG();
if (PageActive(page))
BUG();
- if (PageInactive(page))
- BUG();
if (PageDirty(page))
BUG();
--- 279,286 ----
if (PageLocked(page))
BUG();
! if (PageLRU(page))
BUG();
if (PageActive(page))
BUG();
if (PageDirty(page))
BUG();
***************
*** 312,321 ****
}
- static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order)
- {
- long free = zone->free_pages - (1UL << order);
- return free >= 0 ? free : 0;
- }
-
/*
* This is the 'heart' of the zoned buddy allocator:
--- 307,310 ----
***************
*** 323,326 ****
--- 312,316 ----
struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
{
+ unsigned long min;
zone_t **zone, * classzone;
struct page * page;
***************
*** 329,332 ****
--- 319,323 ----
zone = zonelist->zones;
classzone = *zone;
+ min = 1UL << order;
for (;;) {
zone_t *z = *(zone++);
***************
*** 334,338 ****
break;
! if (zone_free_pages(z, order) > z->pages_low) {
page = rmqueue(z, order);
if (page)
--- 325,330 ----
break;
! min += z->pages_low;
! if (z->free_pages > min) {
page = rmqueue(z, order);
if (page)
***************
*** 347,360 ****
zone = zonelist->zones;
for (;;) {
! unsigned long min;
zone_t *z = *(zone++);
if (!z)
break;
! min = z->pages_min;
if (!(gfp_mask & __GFP_WAIT))
! min >>= 2;
! if (zone_free_pages(z, order) > min) {
page = rmqueue(z, order);
if (page)
--- 339,354 ----
zone = zonelist->zones;
+ min = 1UL << order;
for (;;) {
! unsigned long local_min;
zone_t *z = *(zone++);
if (!z)
break;
! local_min = z->pages_min;
if (!(gfp_mask & __GFP_WAIT))
! local_min >>= 2;
! min += local_min;
! if (z->free_pages > min) {
page = rmqueue(z, order);
if (page)
***************
*** 365,369 ****
/* here we're in the low on memory slow path */
! if (current->flags & PF_MEMALLOC) {
zone = zonelist->zones;
for (;;) {
--- 359,364 ----
/* here we're in the low on memory slow path */
! rebalance:
! if (current->flags & (PF_MEMALLOC | PF_MEMDIE)) {
zone = zonelist->zones;
for (;;) {
***************
*** 378,383 ****
return NULL;
}
- rebalance:
page = balance_classzone(classzone, gfp_mask, order, &freed);
if (page)
--- 373,381 ----
return NULL;
}
+
+ /* Atomic allocations - we can't balance anything */
+ if (!(gfp_mask & __GFP_WAIT))
+ return NULL;
page = balance_classzone(classzone, gfp_mask, order, &freed);
if (page)
***************
*** 385,422 ****
zone = zonelist->zones;
! if (likely(freed)) {
! for (;;) {
! zone_t *z = *(zone++);
! if (!z)
! break;
!
! if (zone_free_pages(z, order) > z->pages_min) {
! page = rmqueue(z, order);
! if (page)
! return page;
! }
! }
! goto rebalance;
! } else {
! /*
! * Check that no other task is been killed meanwhile,
! * in such a case we can succeed the allocation.
! */
! for (;;) {
! zone_t *z = *(zone++);
! if (!z)
! break;
! if (zone_free_pages(z, order) > z->pages_high) {
! page = rmqueue(z, order);
! if (page)
! return page;
! }
}
}
! printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i) from %p\n",
! order, gfp_mask, !!(current->flags & PF_MEMALLOC), __builtin_return_address(0));
! return NULL;
}
--- 383,409 ----
zone = zonelist->zones;
! min = 1UL << order;
! for (;;) {
! zone_t *z = *(zone++);
! if (!z)
! break;
! min += z->pages_min;
! if (z->free_pages > min) {
! page = rmqueue(z, order);
! if (page)
! return page;
}
}
! /* Don't let big-order allocations loop */
! if (order > 3)
! return NULL;
!
! /* Yield for kswapd, and try again */
! current->policy |= SCHED_YIELD;
! __set_current_state(TASK_RUNNING);
! schedule();
! goto rebalance;
}
***************
*** 447,450 ****
--- 434,446 ----
}
+ void page_cache_release(struct page *page)
+ {
+ if (!PageReserved(page) && put_page_testzero(page)) {
+ if (PageLRU(page))
+ lru_cache_del(page);
+ __free_pages_ok(page, 0);
+ }
+ }
+
void __free_pages(struct page *page, unsigned int order)
{
***************
*** 484,501 ****
pg_data_t *pgdat = pgdat_list;
unsigned int sum = 0;
- zonelist_t *zonelist;
- zone_t **zonep, *zone;
do {
! zonelist = pgdat->node_zonelists + __GFP_HIGHMEM;
! zonep = zonelist->zones;
!
! for (zone = *zonep++; zone; zone = *zonep++)
! sum += zone->free_pages;
pgdat = pgdat->node_next;
} while (pgdat);
! return sum + nr_active_pages + nr_inactive_pages;
}
--- 480,500 ----
pg_data_t *pgdat = pgdat_list;
unsigned int sum = 0;
do {
! zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
! zone_t **zonep = zonelist->zones;
! zone_t *zone;
!
! for (zone = *zonep++; zone; zone = *zonep++) {
! unsigned long size = zone->size;
! unsigned long high = zone->pages_high;
! if (size > high)
! sum += size - high;
! }
pgdat = pgdat->node_next;
} while (pgdat);
! return sum;
}
***************
*** 514,517 ****
--- 513,518 ----
#endif
+ #define K(x) ((x) << (PAGE_SHIFT-10))
+
/*
* Show free area list (used inside shift_scroll-lock stuff)
***************
*** 523,530 ****
unsigned int order;
unsigned type;
printk("Free pages: %6dkB (%6dkB HighMem)\n",
! nr_free_pages() << (PAGE_SHIFT-10),
! nr_free_highpages() << (PAGE_SHIFT-10));
printk("( Active: %d, inactive: %d, free: %d )\n",
--- 524,547 ----
unsigned int order;
unsigned type;
+ pg_data_t *tmpdat = pgdat;
printk("Free pages: %6dkB (%6dkB HighMem)\n",
! K(nr_free_pages()),
! K(nr_free_highpages()));
!
! while (tmpdat) {
! zone_t *zone;
! for (zone = tmpdat->node_zones;
! zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
! printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB "
! "high:%6lukB\n",
! zone->name,
! K(zone->free_pages),
! K(zone->pages_min),
! K(zone->pages_low),
! K(zone->pages_high));
!
! tmpdat = tmpdat->node_next;
! }
printk("( Active: %d, inactive: %d, free: %d )\n",
***************
*** 552,561 ****
}
total += nr * (1 << order);
! printk("%lu*%lukB ", nr,
! (PAGE_SIZE>>10) << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
}
! printk("= %lukB)\n", total * (PAGE_SIZE>>10));
}
--- 569,577 ----
}
total += nr * (1 << order);
! printk("%lu*%lukB ", nr, K(1UL) << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
}
! printk("= %lukB)\n", K(total));
}
Index: shmem.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/shmem.c,v
retrieving revision 1.10
retrieving revision 1.11
diff -C2 -r1.10 -r1.11
*** shmem.c 2001/09/29 20:30:14 1.10
--- shmem.c 2001/12/12 20:45:46 1.11
***************
*** 27,42 ****
#include <linux/string.h>
#include <linux/locks.h>
! #include <asm/smplock.h>
#include <asm/uaccess.h>
-
#ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
#endif
/* This magic number is used in glibc for posix shared memory */
[...1438 lines suppressed...]
- spin_unlock (&shmem_ilock);
- }
-
-
/*
* shmem_file_setup - get an unlinked file living in shmem fs
--- 1448,1451 ----
***************
*** 1338,1342 ****
return ERR_PTR(-EINVAL);
! if (!vm_enough_memory((size) >> PAGE_SHIFT))
return ERR_PTR(-ENOMEM);
--- 1467,1471 ----
return ERR_PTR(-EINVAL);
! if (!vm_enough_memory((size) >> PAGE_CACHE_SHIFT))
return ERR_PTR(-ENOMEM);
Index: swap_state.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swap_state.c,v
retrieving revision 1.15
retrieving revision 1.16
diff -C2 -r1.15 -r1.16
*** swap_state.c 2001/09/29 20:30:14 1.15
--- swap_state.c 2001/12/12 20:45:46 1.16
***************
*** 17,21 ****
#include <asm/pgtable.h>
-
#ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
--- 17,20 ----
***************
*** 29,34 ****
static int swap_writepage(struct page *page)
{
! if (exclusive_swap_page(page)) {
! delete_from_swap_cache(page);
UnlockPage(page);
return 0;
--- 28,32 ----
static int swap_writepage(struct page *page)
{
! if (remove_exclusive_swap_page(page)) {
UnlockPage(page);
return 0;
***************
*** 56,92 ****
#ifdef SWAP_CACHE_INFO
! unsigned long swap_cache_add_total;
! unsigned long swap_cache_del_total;
! unsigned long swap_cache_find_total;
! unsigned long swap_cache_find_success;
void show_swap_cache_info(void)
{
! printk("Swap cache: add %ld, delete %ld, find %ld/%ld\n",
! swap_cache_add_total,
! swap_cache_del_total,
! swap_cache_find_success, swap_cache_find_total);
}
#endif
! void add_to_swap_cache(struct page *page, swp_entry_t entry)
{
- unsigned long flags;
-
- #ifdef SWAP_CACHE_INFO
- swap_cache_add_total++;
- #endif
- if (!PageLocked(page))
- BUG();
if (page->mapping)
BUG();
! /* clear PG_dirty so a subsequent set_page_dirty takes effect */
! flags = page->flags & ~(1 << PG_error | 1 << PG_dirty | 1 << PG_arch_1 | 1 << PG_referenced);
! page->flags = flags | (1 << PG_uptodate);
#ifdef CONFIG_COMP_CACHE
add_swap_cache_page_vswap(page, entry);
#endif
! add_to_page_cache_locked(page, &swapper_space, entry.val);
}
--- 54,103 ----
#ifdef SWAP_CACHE_INFO
! #define INC_CACHE_INFO(x) (swap_cache_info.x++)
!
! static struct {
! unsigned long add_total;
! unsigned long del_total;
! unsigned long find_success;
! unsigned long find_total;
! unsigned long noent_race;
! unsigned long exist_race;
! } swap_cache_info;
void show_swap_cache_info(void)
{
! printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
! swap_cache_info.add_total, swap_cache_info.del_total,
! swap_cache_info.find_success, swap_cache_info.find_total,
! swap_cache_info.noent_race, swap_cache_info.exist_race);
}
+ #else
+ #define INC_CACHE_INFO(x) do { } while (0)
#endif
! int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
if (page->mapping)
BUG();
! if (!swap_duplicate(entry)) {
! INC_CACHE_INFO(noent_race);
! return -ENOENT;
! }
! if (add_to_page_cache_unique(page, &swapper_space, entry.val,
! page_hash(&swapper_space, entry.val)) != 0) {
! swap_free(entry);
! INC_CACHE_INFO(exist_race);
! return -EEXIST;
! }
#ifdef CONFIG_COMP_CACHE
add_swap_cache_page_vswap(page, entry);
#endif
! if (!PageLocked(page))
! BUG();
! if (!PageSwapCache(page))
! BUG();
! INC_CACHE_INFO(add_total);
! return 0;
}
***************
*** 97,108 ****
void __delete_from_swap_cache(struct page *page)
{
- #ifdef SWAP_CACHE_INFO
- swap_cache_del_total++;
- #endif
if (!PageLocked(page))
BUG();
if (!PageSwapCache(page))
BUG();
-
ClearPageDirty(page);
#ifdef CONFIG_COMP_CACHE
--- 108,115 ----
***************
*** 110,113 ****
--- 117,121 ----
#endif
__remove_inode_page(page);
+ INC_CACHE_INFO(del_total);
}
***************
*** 125,130 ****
BUG();
! if (block_flushpage(page, 0))
! lru_cache_del(page);
entry.val = page->index;
--- 133,137 ----
BUG();
! block_flushpage(page, 0);
entry.val = page->index;
***************
*** 154,159 ****
*/
if (PageSwapCache(page) && !TryLockPage(page)) {
! if (exclusive_swap_page(page))
! delete_from_swap_cache(page);
UnlockPage(page);
}
--- 161,165 ----
*/
if (PageSwapCache(page) && !TryLockPage(page)) {
! remove_exclusive_swap_page(page);
UnlockPage(page);
}
***************
*** 171,177 ****
struct page *found;
- #ifdef SWAP_CACHE_INFO
- swap_cache_find_total++;
- #endif
found = find_get_page(&swapper_space, entry.val);
/*
--- 177,180 ----
***************
*** 181,188 ****
* that, but no need to change: we _have_ got the right page.
*/
! #ifdef SWAP_CACHE_INFO
if (found)
! swap_cache_find_success++;
! #endif
return found;
}
--- 184,190 ----
* that, but no need to change: we _have_ got the right page.
*/
! INC_CACHE_INFO(find_total);
if (found)
! INC_CACHE_INFO(find_success);
return found;
}
***************
*** 196,266 ****
struct page * read_swap_cache_async(swp_entry_t entry)
{
! struct page *found_page, *new_page;
! struct page **hash;
!
! /*
! * Look for the page in the swap cache. Since we normally call
! * this only after lookup_swap_cache() failed, re-calling that
! * would confuse the statistics: use __find_get_page() directly.
! */
! hash = page_hash(&swapper_space, entry.val);
! #ifdef CONFIG_COMP_CACHE
! found_page = lookup_comp_cache(entry);
! #else
! found_page = __find_get_page(&swapper_space, entry.val, hash);
! #endif
!
! if (found_page)
! goto out;
!
! new_page = alloc_page(GFP_HIGHUSER);
! if (!new_page)
! goto out; /* Out of memory */
! if (TryLockPage(new_page))
! BUG();
! /*
! * Check the swap cache again, in case we stalled above.
! * swap_list_lock is guarding against races between this check
! * and where the new page is added to the swap cache below.
! * It is also guarding against race where try_to_swap_out
! * allocates entry with get_swap_page then adds to cache.
! */
#ifdef CONFIG_COMP_CACHE
! found_page = lookup_comp_cache(entry);
#else
! swap_list_lock();
! found_page = __find_get_page(&swapper_space, entry.val, hash);
#endif
!
! if (found_page)
! goto out_free_page;
!
! /*
! * Make sure the swap entry is still in use. It could have gone
! * since caller dropped page_table_lock, while allocating page above,
! * or while allocating page in prior call via swapin_readahead.
! */
! if (!swap_duplicate(entry)) /* Account for the swap cache */
! goto out_free_page;
! /*
! * Add it to the swap cache and read its contents.
! */
! add_to_swap_cache(new_page, entry);
! swap_list_unlock();
!
! rw_swap_page(READ, new_page);
#ifdef CONFIG_COMP_SWAP
! if (swap_compressed(entry))
! PageSetCompSwapCache(new_page);
! #endif
! return new_page;
! out_free_page:
! swap_list_unlock();
! UnlockPage(new_page);
! page_cache_release(new_page);
! out:
return found_page;
}
--- 198,253 ----
struct page * read_swap_cache_async(swp_entry_t entry)
{
! struct page *found_page, *new_page = NULL;
! int err;
! do {
! /*
! * First check the swap cache. Since this is normally
! * called after lookup_swap_cache() failed, re-calling
! * that would confuse statistics: use find_get_page()
! * directly.
! */
#ifdef CONFIG_COMP_CACHE
! found_page = lookup_comp_cache(entry);
#else
! found_page = find_get_page(&swapper_space, entry.val);
#endif
! if (found_page)
! break;
! /*
! * Get a new page to read into from swap.
! */
! if (!new_page) {
! new_page = alloc_page(GFP_HIGHUSER);
! if (!new_page)
! break; /* Out of memory */
! }
!
! /*
! * Associate the page with swap entry in the swap cache.
! * May fail (-ENOENT) if swap entry has been freed since
! * our caller observed it. May fail (-EEXIST) if there
! * is already a page associated with this entry in the
! * swap cache: added by a racing read_swap_cache_async,
! * or by try_to_swap_out (or shmem_writepage) re-using
! * the just freed swap entry for an existing page.
! */
! err = add_to_swap_cache(new_page, entry);
! if (!err) {
! /*
! * Initiate read into locked page and return.
! */
! rw_swap_page(READ, new_page);
#ifdef CONFIG_COMP_SWAP
! if (swap_compressed(entry))
! PageSetCompSwapCache(new_page);
! #endif
! return new_page;
! }
! } while (err != -ENOENT);
! if (new_page)
! page_cache_release(new_page);
return found_page;
}
Index: swapfile.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swapfile.c,v
retrieving revision 1.18
retrieving revision 1.19
diff -C2 -r1.18 -r1.19
*** swapfile.c 2001/09/29 20:30:14 1.18
--- swapfile.c 2001/12/12 20:45:46 1.19
***************
*** 18,26 ****
#include <asm/pgtable.h>
-
#ifdef CONFIG_COMP_CACHE
-
#include <linux/comp_cache.h>
-
#endif
--- 18,23 ----
***************
*** 48,52 ****
unsigned long offset, type;
! if (virtual_swap_address(entry))
BUG();
--- 45,49 ----
unsigned long offset, type;
! if (vswap_address(entry))
BUG();
***************
*** 63,67 ****
goto bad_offset;
if (!p->swap_map[offset])
! goto bad_unused;
if (!compressed) {
--- 60,64 ----
goto bad_offset;
if (!p->swap_map[offset])
! goto bad_unused;
if (!compressed) {
***************
*** 94,107 ****
return;
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (scse) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
--- 91,104 ----
return;
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (scse) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
***************
*** 115,119 ****
int retval = -1;
! if (virtual_swap_address(entry))
BUG();
--- 112,116 ----
int retval = -1;
! if (vswap_address(entry))
BUG();
***************
*** 143,162 ****
if (retval == -1)
BUG();
! out:
return retval;
!
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (swap_algorithm) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
! bad_compressed:
printk("Swap offset entry not compressed %08lx\n", entry.val);
goto out;
--- 140,159 ----
if (retval == -1)
BUG();
! out:
return retval;
!
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (swap_algorithm) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
! bad_compressed:
printk("Swap offset entry not compressed %08lx\n", entry.val);
goto out;
***************
*** 170,174 ****
int retval = 0;
! if (virtual_swap_address(entry))
BUG();
--- 167,171 ----
int retval = 0;
! if (vswap_address(entry))
BUG();
***************
*** 186,206 ****
goto bad_unused;
retval = EntryCompressed(p, offset);
! out:
return retval;
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (swap_compressed) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
}
-
#endif
--- 183,202 ----
goto bad_unused;
retval = EntryCompressed(p, offset);
! out:
return retval;
! bad_entry:
printk("Null entry in swap_compressed\n");
goto out;
! bad_file:
printk("Bad swap file entry (swap_compressed) %08lx\n", entry.val);
goto out;
! bad_offset:
printk("Bad swap offset entry %08lx\n", entry.val);
goto out;
! bad_unused:
printk("Unused swap offset entry in swap_compressed %08lx\n", entry.val);
goto out;
}
#endif
***************
*** 258,263 ****
si->highest_bit = 0;
}
! /* Initial count 1 for user reference + 1 for swap cache */
! si->swap_map[offset] = 2;
nr_swap_pages--;
si->cluster_next = offset+1;
--- 254,258 ----
si->highest_bit = 0;
}
! si->swap_map[offset] = 1;
nr_swap_pages--;
si->cluster_next = offset+1;
***************
*** 269,277 ****
}
- /*
- * Callers of get_swap_page must hold swap_list_lock across the call,
- * and across the following add_to_swap_cache, to guard against races
- * with read_swap_cache_async.
- */
swp_entry_t get_swap_page(void)
{
--- 264,267 ----
***************
*** 282,285 ****
--- 272,276 ----
entry.val = 0; /* Out of memory */
+ swap_list_lock();
type = swap_list.next;
if (type < 0)
***************
*** 317,328 ****
}
out:
return entry;
}
! /*
! * Caller has made sure that the swapdevice corresponding to entry
! * is still around or has not been recycled.
! */
! void swap_free(swp_entry_t entry)
{
struct swap_info_struct * p;
--- 308,316 ----
}
out:
+ swap_list_unlock();
return entry;
}
! static struct swap_info_struct * swap_info_get(swp_entry_t entry)
{
struct swap_info_struct * p;
***************
*** 331,342 ****
if (!entry.val)
goto out;
-
type = SWP_TYPE(entry);
#ifdef CONFIG_COMP_CACHE
! /* virtual swap address */
! if (type == COMP_CACHE_SWP_TYPE) {
! comp_cache_swp_free(entry);
! return;
! }
#endif
if (type >= nr_swapfiles)
--- 319,326 ----
if (!entry.val)
goto out;
type = SWP_TYPE(entry);
#ifdef CONFIG_COMP_CACHE
! if (vswap_address(entry))
! return &swap_info[type];
#endif
if (type >= nr_swapfiles)
***************
*** 354,368 ****
swap_list.next = type;
swap_device_lock(p);
! if (p->swap_map[offset] < SWAP_MAP_MAX) {
#ifdef CONFIG_COMP_CACHE
! if (!--(p->swap_map[offset])) {
#ifdef CONFIG_COMP_SWAP
p->swap_comp[offset] = 0;
#endif
/* let's keep the swap_map[offset] used for
* the case the comp_cache_release() calls
* swap_dup() */
p->swap_map[offset]++;
! if (!comp_cache_release(entry)) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
--- 338,391 ----
swap_list.next = type;
swap_device_lock(p);
! return p;
!
! bad_free:
! printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
! goto out;
! bad_offset:
! printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
! goto out;
! bad_device:
! printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
! goto out;
! bad_nofile:
! printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
! out:
! return NULL;
! }
!
! static void swap_info_put(struct swap_info_struct * p)
! {
#ifdef CONFIG_COMP_CACHE
! if (vswap_info_struct(p))
! return;
! #endif
! swap_device_unlock(p);
! swap_list_unlock();
! }
!
! static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
! {
! int count;
!
! #ifdef CONFIG_COMP_CACHE
! if (vswap_info_struct(p))
! return comp_cache_swp_free(SWP_ENTRY(COMP_CACHE_SWP_TYPE, offset));
! #endif
! count = p->swap_map[offset];
!
! if (count < SWAP_MAP_MAX) {
! count--;
! p->swap_map[offset] = count;
! if (!count) {
#ifdef CONFIG_COMP_SWAP
p->swap_comp[offset] = 0;
#endif
+ #ifdef CONFIG_COMP_CACHE
/* let's keep the swap_map[offset] used for
* the case the comp_cache_release() calls
* swap_dup() */
p->swap_map[offset]++;
! if (!comp_cache_release(SWP_ENTRY(p - swap_info, offset))) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
***************
*** 373,377 ****
p->swap_map[offset]--;
#else
- if (!--(p->swap_map[offset])) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
--- 396,399 ----
***************
*** 382,405 ****
}
}
! swap_device_unlock(p);
! swap_list_unlock();
! out:
! return;
! bad_nofile:
! printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
! goto out;
! bad_device:
! printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
! goto out;
! bad_offset:
! printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
! goto out;
! bad_free:
! printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
! goto out;
}
/*
* The swap entry has been read in advance, and we return 1 to indicate
* that the page has been used or is no longer needed.
--- 404,566 ----
}
}
! return count;
! }
! /*
! * Caller has made sure that the swapdevice corresponding to entry
! * is still around or has not been recycled.
! */
! void swap_free(swp_entry_t entry)
! {
! struct swap_info_struct * p;
!
! p = swap_info_get(entry);
! if (p) {
! swap_entry_free(p, SWP_OFFSET(entry));
! swap_info_put(p);
! }
! }
!
! /*
! * Check if we're the only user of a swap page,
! * when the page is locked.
! */
! static int exclusive_swap_page(struct page *page)
! {
! int retval = 0;
! struct swap_info_struct * p;
! swp_entry_t entry;
!
! entry.val = page->index;
! p = swap_info_get(entry);
! if (p) {
! /* Is the only swap cache user the cache itself? */
! #ifdef CONFIG_COMP_CACHE
! if ((vswap_address(entry) && comp_cache_swp_count(entry) == 1) ||
! (!vswap_address(entry) && p->swap_map[SWP_OFFSET(entry)] == 1)) {
! #else
! if (p->swap_map[SWP_OFFSET(entry)] == 1) {
! #endif
! /* Recheck the page count with the pagecache lock held.. */
! spin_lock(&pagecache_lock);
! if (page_count(page) - !!page->buffers == 2)
! retval = 1;
! spin_unlock(&pagecache_lock);
! }
! swap_info_put(p);
! }
! return retval;
}
/*
+ * We can use this swap cache entry directly
+ * if there are no other references to it.
+ *
+ * Here "exclusive_swap_page()" does the real
+ * work, but we opportunistically check whether
+ * we need to get all the locks first..
+ */
+ int can_share_swap_page(struct page *page)
+ {
+ int retval = 0;
+
+ if (!PageLocked(page))
+ BUG();
+ switch (page_count(page)) {
+ case 3:
+ if (!page->buffers)
+ break;
+ /* Fallthrough */
+ case 2:
+ if (!PageSwapCache(page))
+ break;
+ retval = exclusive_swap_page(page);
+ break;
+ case 1:
+ if (PageReserved(page))
+ break;
+ retval = 1;
+ }
+ return retval;
+ }
+
+ /*
+ * Work out if there are any other processes sharing this
+ * swap cache page. Free it if you can. Return success.
+ */
+ int remove_exclusive_swap_page(struct page *page)
+ {
+ int retval;
+ struct swap_info_struct * p;
+ swp_entry_t entry;
+
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ return 0;
+ if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
+ return 0;
+
+ entry.val = page->index;
+ p = swap_info_get(entry);
+ if (!p)
+ return 0;
+
+ /* Is the only swap cache user the cache itself? */
+ retval = 0;
+ #ifdef CONFIG_COMP_CACHE
+ if ((vswap_address(entry) && comp_cache_swp_count(entry) == 1) ||
+ (!vswap_address(entry) && p->swap_map[SWP_OFFSET(entry)] == 1)) {
+ #else
+ if (p->swap_map[SWP_OFFSET(entry)] == 1) {
+ #endif
+ /* Recheck the page count with the pagecache lock held.. */
+ spin_lock(&pagecache_lock);
+ if (page_count(page) - !!page->buffers == 2) {
+ __delete_from_swap_cache(page);
+ SetPageDirty(page);
+ retval = 1;
+ }
+ spin_unlock(&pagecache_lock);
+ }
+ swap_info_put(p);
+
+ if (retval) {
+ block_flushpage(page, 0);
+ swap_free(entry);
+ page_cache_release(page);
+ }
+
+ return retval;
+ }
+
+ /*
+ * Free the swap entry like above, but also try to
+ * free the page cache entry if it is the last user.
+ */
+ void free_swap_and_cache(swp_entry_t entry)
+ {
+ struct swap_info_struct * p;
+ struct page *page = NULL;
+
+ p = swap_info_get(entry);
+ if (p) {
+ if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
+ page = find_trylock_page(&swapper_space, entry.val);
+ swap_info_put(p);
+ }
+ if (page) {
+ page_cache_get(page);
+ /* Only cache user (+us), or swap space full? Free it! */
+ if (page_count(page) == 2 || vm_swap_full()) {
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ }
+ }
+
+ /*
* The swap entry has been read in advance, and we return 1 to indicate
* that the page has been used or is no longer needed.
***************
*** 410,414 ****
* what to do if a write is requested later.
*/
! /* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
pte_t *dir, swp_entry_t entry, struct page* page)
--- 571,575 ----
* what to do if a write is requested later.
*/
! /* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
pte_t *dir, swp_entry_t entry, struct page* page)
***************
*** 422,431 ****
* address (entry), remap the pte to the page that has been
* just swapped in */
! if (virtual_swap_address(pte_to_swp_entry(pte))) {
unsigned long offset = SWP_OFFSET(pte_to_swp_entry(pte));
!
if (real_swap_address(offset) != entry.val)
return;
!
/* free the virtual swap entry */
swap_free(pte_to_swp_entry(pte));
--- 583,592 ----
* address (entry), remap the pte to the page that has been
* just swapped in */
! if (vswap_address(pte_to_swp_entry(pte))) {
unsigned long offset = SWP_OFFSET(pte_to_swp_entry(pte));
!
if (real_swap_address(offset) != entry.val)
return;
!
/* free the virtual swap entry */
swap_free(pte_to_swp_entry(pte));
***************
*** 447,451 ****
}
! /* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
unsigned long address, unsigned long size, unsigned long offset,
--- 608,612 ----
}
! /* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
unsigned long address, unsigned long size, unsigned long offset,
***************
*** 475,479 ****
}
! /* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
unsigned long address, unsigned long size,
--- 636,640 ----
}
! /* mmlist_lock and vma->vm_mm->page_table_lock are held */
static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
unsigned long address, unsigned long size,
***************
*** 506,510 ****
}
! /* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
swp_entry_t entry, struct page* page)
--- 667,671 ----
}
! /* mmlist_lock and vma->vm_mm->page_table_lock are held */
static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
swp_entry_t entry, struct page* page)
***************
*** 646,653 ****
/*
* Don't hold on to start_mm if it looks like exiting.
- * Can mmput ever block? if so, then we cannot risk
- * it between deleting the page from the swap cache,
- * and completing the search through mms (and cannot
- * use it to avoid the long hold on mmlist_lock there).
*/
if (atomic_read(&start_mm->mm_users) == 1) {
--- 807,810 ----
***************
*** 658,698 ****
/*
! * Wait for and lock page. Remove it from swap cache
! * so try_to_swap_out won't bump swap count. Mark dirty
! * so try_to_swap_out will preserve it without us having
! * to mark any present ptes as dirty: so we can skip
! * searching processes once swap count has all gone.
*/
lock_page(page);
- #ifdef CONFIG_COMP_SWAP
- if (PageCompSwapCache(page)) {
- struct page *tmp_page = alloc_page(GFP_ATOMIC);
- swp_entry_t real_entry;
-
- real_entry.val = page->index;
-
- if (!tmp_page)
- panic("do_swap_page(): couldn't alloc temp page\n");
-
- if (TryLockPage(tmp_page))
- BUG();
-
- /* copy - page -> tmp_page */
- copy_page(page_address(tmp_page), page_address(page));
-
- /* decompress - tmp_page -> page */
- decompress(swap_algorithm(real_entry), page_address(tmp_page), page_address(page));
-
- UnlockPage(tmp_page);
- page_cache_release(tmp_page);
-
- PageClearCompSwapCache(page);
- }
- #endif
- if (PageSwapCache(page))
- delete_from_swap_cache(page);
- SetPageDirty(page);
- UnlockPage(page);
- flush_page_to_ram(page);
/*
--- 815,827 ----
/*
! * Wait for and lock page. When do_swap_page races with
! * try_to_unuse, do_swap_page can handle the fault much
! * faster than try_to_unuse can locate the entry. This
! * apparently redundant "wait_on_page" lets try_to_unuse
! * defer to do_swap_page in such a case - in some tests,
! * do_swap_page and try_to_unuse repeatedly compete.
*/
+ wait_on_page(page);
lock_page(page);
/*
***************
*** 702,706 ****
*/
swcount = *swap_map;
! if (swcount) {
if (start_mm == &init_mm)
shmem_unuse(entry, page);
--- 831,836 ----
*/
swcount = *swap_map;
! if (swcount > 1) {
! flush_page_to_ram(page);
if (start_mm == &init_mm)
shmem_unuse(entry, page);
***************
*** 708,712 ****
unuse_process(start_mm, entry, page);
}
! if (*swap_map) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
--- 838,842 ----
unuse_process(start_mm, entry, page);
}
! if (*swap_map > 1) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
***************
*** 715,719 ****
spin_lock(&mmlist_lock);
! while (*swap_map && (p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
swcount = *swap_map;
--- 845,850 ----
spin_lock(&mmlist_lock);
! while (*swap_map > 1 &&
! (p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
swcount = *swap_map;
***************
*** 733,737 ****
start_mm = new_start_mm;
}
- page_cache_release(page);
/*
--- 864,867 ----
***************
*** 752,756 ****
swap_device_lock(si);
nr_swap_pages++;
! *swap_map = 0;
swap_device_unlock(si);
swap_list_unlock();
--- 882,886 ----
swap_device_lock(si);
nr_swap_pages++;
! *swap_map = 1;
swap_device_unlock(si);
swap_list_unlock();
***************
*** 759,762 ****
--- 889,950 ----
/*
+ * If a reference remains (rare), we would like to leave
+ * the page in the swap cache; but try_to_swap_out could
+ * then re-duplicate the entry once we drop page lock,
+ * so we might loop indefinitely; also, that page could
+ * not be swapped out to other storage meanwhile. So:
+ * delete from cache even if there's another reference,
+ * after ensuring that the data has been saved to disk -
+ * since if the reference remains (rarer), it will be
+ * read from disk into another page. Splitting into two
+ * pages would be incorrect if swap supported "shared
+ * private" pages, but they are handled by tmpfs files.
+ * Note shmem_unuse already deleted its from swap cache.
+ */
+ swcount = *swap_map;
+ if ((swcount > 0) != PageSwapCache(page))
+ BUG();
+ if ((swcount > 1) && PageDirty(page)) {
+ rw_swap_page(WRITE, page);
+ lock_page(page);
+ }
+ #ifdef CONFIG_COMP_SWAP
+ if (PageCompSwapCache(page)) {
+ struct page *tmp_page = alloc_page(GFP_ATOMIC);
+ swp_entry_t real_entry;
+
+ real_entry.val = page->index;
+
+ if (!tmp_page)
+ panic("do_swap_page(): couldn't alloc temp page\n");
+
+ if (TryLockPage(tmp_page))
+ BUG();
+
+ /* copy - page -> tmp_page */
+ copy_page(page_address(tmp_page), page_address(page));
+
+ /* decompress - tmp_page -> page */
+ decompress(swap_algorithm(real_entry), page_address(tmp_page), page_address(page));
+
+ UnlockPage(tmp_page);
+ page_cache_release(tmp_page);
+
+ PageClearCompSwapCache(page);
+ }
+ #endif
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
+
+ /*
+ * So we could skip searching mms once swap count went
+ * to 1, we did not mark any present ptes as dirty: must
+ * mark page dirty so try_to_swap_out will preserve it.
+ */
+ SetPageDirty(page);
+ UnlockPage(page);
+ page_cache_release(page);
+
+ /*
* Make sure that we aren't completely killing
* interactive performance. Interruptible check on
***************
*** 765,772 ****
if (current->need_resched)
schedule();
- else {
- unlock_kernel();
- lock_kernel();
- }
}
--- 953,956 ----
***************
*** 824,828 ****
--- 1008,1014 ----
p->flags = SWP_USED;
swap_list_unlock();
+ unlock_kernel();
err = try_to_unuse(type);
+ lock_kernel();
if (err) {
/* re-insert swap space back into swap_list */
***************
*** 856,859 ****
--- 1042,1048 ----
swap_map = p->swap_map;
p->swap_map = NULL;
+ #ifdef CONFIG_COMP_SWAP
+ p->swap_comp = NULL;
+ #endif
p->flags = 0;
swap_device_unlock(p);
***************
*** 962,968 ****
p->swap_device = 0;
p->swap_map = NULL;
- #ifdef CONFIG_COMP_SWAP
- p->swap_comp = NULL;
- #endif
p->lowest_bit = 0;
p->highest_bit = 0;
--- 1151,1154 ----
***************
*** 1065,1072 ****
goto bad_swap;
}
-
#ifdef CONFIG_COMP_SWAP
p->swap_comp = vmalloc(maxpages * sizeof(short));
-
if (!p->swap_comp) {
vfree(p->swap_map);
--- 1251,1256 ----
***************
*** 1076,1080 ****
memset(p->swap_comp, 0, maxpages * sizeof(short));
#endif
-
for (i = 1 ; i < maxpages ; i++) {
if (test_bit(i,(char *) swap_header))
--- 1260,1263 ----
***************
*** 1111,1115 ****
goto bad_swap;
}
-
#ifdef CONFIG_COMP_SWAP
if (!(p->swap_comp = vmalloc (maxpages * sizeof(short)))) {
--- 1294,1297 ----
***************
*** 1120,1123 ****
--- 1302,1306 ----
memset(p->swap_comp, 0, maxpages * sizeof(short));
#endif
+
error = 0;
memset(p->swap_map, 0, maxpages * sizeof(short));
***************
*** 1240,1245 ****
#ifdef CONFIG_COMP_CACHE
! /* virtual swap address */
! if (virtual_swap_address(entry)) {
comp_cache_swp_duplicate(entry);
return 1;
--- 1423,1427 ----
#ifdef CONFIG_COMP_CACHE
! if (vswap_address(entry)) {
comp_cache_swp_duplicate(entry);
return 1;
***************
*** 1287,1298 ****
if (!entry.val)
goto bad_entry;
- type = SWP_TYPE(entry);
#ifdef CONFIG_COMP_CACHE
! /* virtual swap address */
! if (type == COMP_CACHE_SWP_TYPE) {
retval = comp_cache_swp_count(entry);
goto out;
}
! #endif
if (type >= nr_swapfiles)
goto bad_file;
--- 1469,1479 ----
if (!entry.val)
goto bad_entry;
#ifdef CONFIG_COMP_CACHE
! if (vswap_address(entry)) {
retval = comp_cache_swp_count(entry);
goto out;
}
! #endif
! type = SWP_TYPE(entry);
if (type >= nr_swapfiles)
goto bad_file;
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.19
retrieving revision 1.20
diff -C2 -r1.19 -r1.20
*** vmscan.c 2001/09/29 20:30:14 1.19
--- vmscan.c 2001/12/12 20:45:46 1.20
***************
*** 8,12 ****
* Removed kswapd_ctl limits, and swap out as many pages as needed
* to bring the system back to f...
[truncated message content] |
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:50
|
Update of /cvsroot/linuxcompressed/linux/include/linux
In directory usw-pr-cvs1:/tmp/cvs-serv17791/include/linux
Modified Files:
comp_cache.h mm.h swap.h sysctl.h
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: comp_cache.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v
retrieving revision 1.29
retrieving revision 1.30
diff -C2 -r1.29 -r1.30
*** comp_cache.h 2001/10/08 14:56:35 1.29
--- comp_cache.h 2001/12/12 20:45:46 1.30
***************
*** 2,6 ****
* linux/mm/comp_cache.h
*
! * Time-stamp: <2001-10-06 17:17:13 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache.h
*
! * Time-stamp: <2001-12-11 12:56:38 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 349,353 ****
#define vswap_address_available() (!list_empty(&vswap_address_free_head))
! #define virtual_swap_address(entry) (SWP_TYPE(entry) == COMP_CACHE_SWP_TYPE)
#define real_swap_address(offset) (vswap_address[offset]->real_entry.val)
#define vswap_locked(offset) (down_trylock(&vswap_address[offset]->sem))
--- 349,354 ----
#define vswap_address_available() (!list_empty(&vswap_address_free_head))
! #define vswap_info_struct(p) (p == &swap_info[COMP_CACHE_SWP_TYPE])
! #define vswap_address(entry) (SWP_TYPE(entry) == COMP_CACHE_SWP_TYPE)
#define real_swap_address(offset) (vswap_address[offset]->real_entry.val)
#define vswap_locked(offset) (down_trylock(&vswap_address[offset]->sem))
***************
*** 361,365 ****
void comp_cache_swp_duplicate(swp_entry_t);
! void comp_cache_swp_free_generic(swp_entry_t, int);
int comp_cache_swp_count(swp_entry_t);
--- 362,366 ----
void comp_cache_swp_duplicate(swp_entry_t);
! int comp_cache_swp_free_generic(swp_entry_t, int);
int comp_cache_swp_count(swp_entry_t);
Index: mm.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/mm.h,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -r1.8 -r1.9
*** mm.h 2001/09/29 20:30:14 1.8
--- mm.h 2001/12/12 20:45:46 1.9
***************
*** 44,48 ****
struct mm_struct * vm_mm; /* The address space we belong to. */
unsigned long vm_start; /* Our start address within vm_mm. */
! unsigned long vm_end; /* Our end address within vm_mm. */
/* linked list of VM areas per task, sorted by address */
--- 44,49 ----
struct mm_struct * vm_mm; /* The address space we belong to. */
unsigned long vm_start; /* Our start address within vm_mm. */
! unsigned long vm_end; /* The first byte after our end address
! within vm_mm. */
/* linked list of VM areas per task, sorted by address */
***************
*** 111,114 ****
--- 112,119 ----
#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
+ /* read ahead limits */
+ extern int vm_min_readahead;
+ extern int vm_max_readahead;
+
/*
* mapping from the currently active vm_flags protection bits (the
***************
*** 126,130 ****
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
! struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
};
--- 131,135 ----
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
! struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
};
***************
*** 271,277 ****
#define PG_uptodate 3
#define PG_dirty 4
! #define PG_decr_after 5
! #define PG_active 6
! #define PG_inactive 7
#define PG_slab 8
#define PG_skip 10
--- 276,282 ----
#define PG_uptodate 3
#define PG_dirty 4
! #define PG_unused 5
! #define PG_lru 6
! #define PG_active 7
#define PG_slab 8
#define PG_skip 10
***************
*** 280,292 ****
#define PG_arch_1 13
#define PG_reserved 14
!
#ifdef CONFIG_COMP_CACHE
! #define PG_comp_cache 15
#endif
#ifdef CONFIG_COMP_SWAP
! #define PG_comp_swap_cache 16
#endif
/* Make it prettier to test the above... */
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
--- 285,298 ----
#define PG_arch_1 13
#define PG_reserved 14
! #define PG_launder 15 /* written out by VM pressure.. */
#ifdef CONFIG_COMP_CACHE
! #define PG_comp_cache 16
#endif
#ifdef CONFIG_COMP_SWAP
! #define PG_comp_swap_cache 17
#endif
/* Make it prettier to test the above... */
+ #define UnlockPage(page) unlock_page(page)
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
***************
*** 300,311 ****
#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
!
! extern void __set_page_dirty(struct page *);
! static inline void set_page_dirty(struct page * page)
! {
! if (!test_and_set_bit(PG_dirty, &page->flags))
! __set_page_dirty(page);
! }
/*
--- 306,313 ----
#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
! #define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
! #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
! extern void FASTCALL(set_page_dirty(struct page *));
/*
***************
*** 315,325 ****
* parallel wait_on_page).
*/
- #define UnlockPage(page) do { \
- smp_mb__before_clear_bit(); \
- if (!test_and_clear_bit(PG_locked, &(page)->flags)) BUG(); \
- smp_mb__after_clear_bit(); \
- if (waitqueue_active(&(page)->wait)) \
- wake_up(&(page)->wait); \
- } while (0)
#define PageError(page) test_bit(PG_error, &(page)->flags)
#define SetPageError(page) set_bit(PG_error, &(page)->flags)
--- 317,320 ----
***************
*** 329,335 ****
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
#define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
- #define PageDecrAfter(page) test_bit(PG_decr_after, &(page)->flags)
- #define SetPageDecrAfter(page) set_bit(PG_decr_after, &(page)->flags)
- #define PageTestandClearDecrAfter(page) test_and_clear_bit(PG_decr_after, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
--- 324,327 ----
***************
*** 338,345 ****
#ifdef CONFIG_COMP_CACHE
! #define PageCompCache(page) test_bit(PG_comp_cache, &(page)->flags)
! #define PageSetCompCache(page) set_bit(PG_comp_cache, &(page)->flags)
! #define PageClearCompCache(page) clear_bit(PG_comp_cache, &(page)->flags)
! #define PageTestandSetCompCache(page) test_and_set_bit(PG_comp_cache, &(page)->flags)
#endif
#ifdef CONFIG_COMP_SWAP
--- 330,337 ----
#ifdef CONFIG_COMP_CACHE
! #define PageCompCache(page) test_bit(PG_comp_cache, &(page)->flags)
! #define PageSetCompCache(page) set_bit(PG_comp_cache, &(page)->flags)
! #define PageClearCompCache(page) clear_bit(PG_comp_cache, &(page)->flags)
! #define PageTestandSetCompCache(page) test_and_set_bit(PG_comp_cache, &(page)->flags)
#endif
#ifdef CONFIG_COMP_SWAP
***************
*** 353,364 ****
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
- #define TestandSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
- #define TestandClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
! #define PageInactive(page) test_bit(PG_inactive, &(page)->flags)
! #define SetPageInactive(page) set_bit(PG_inactive, &(page)->flags)
! #define ClearPageInactive(page) clear_bit(PG_inactive, &(page)->flags)
! #define TestandSetPageInactive(page) test_and_set_bit(PG_inactive, &(page)->flags)
! #define TestandClearPageInactive(page) test_and_clear_bit(PG_inactive, &(page)->flags)
#ifdef CONFIG_HIGHMEM
--- 345,352 ----
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
! #define PageLRU(page) test_bit(PG_lru, &(page)->flags)
! #define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
! #define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
#ifdef CONFIG_HIGHMEM
***************
*** 430,434 ****
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
! struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share);
struct file *shmem_file_setup(char * name, loff_t size);
extern void shmem_lock(struct file * file, int lock);
--- 418,423 ----
extern void clear_page_tables(struct mm_struct *, unsigned long, int);
! extern int fail_writepage(struct page *);
! struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
struct file *shmem_file_setup(char * name, loff_t size);
extern void shmem_lock(struct file * file, int lock);
***************
*** 451,454 ****
--- 440,444 ----
extern int ptrace_detach(struct task_struct *, unsigned int);
extern void ptrace_disable(struct task_struct *);
+ extern int ptrace_check_attach(struct task_struct *task, int kill);
/*
***************
*** 484,501 ****
}
! /*
! * Work out if there are any other processes sharing this
! * swap cache page. Never mind the buffers.
! */
! static inline int exclusive_swap_page(struct page *page)
! {
! if (!PageLocked(page))
! BUG();
! if (!PageSwapCache(page))
! return 0;
! if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */
! return 0;
! return swap_count(page) == 1; /* 1: just cache */
! }
extern void __free_pte(pte_t);
--- 474,479 ----
}
! extern int can_share_swap_page(struct page *);
! extern int remove_exclusive_swap_page(struct page *);
extern void __free_pte(pte_t);
Index: swap.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/swap.h,v
retrieving revision 1.11
retrieving revision 1.12
diff -C2 -r1.11 -r1.12
*** swap.h 2001/10/08 14:56:35 1.11
--- swap.h 2001/12/12 20:45:46 1.12
***************
*** 11,20 ****
#ifdef CONFIG_COMP_CACHE
! /* some architectures may deal with SWP_ENTRY differently, such as
! * UML. Use MAX_SWAPFILES for COMP_CACHE_SWP_TYPE may cause problems,
! * so let's decrease the maximum number in order to use safely the
! * last swap file type (in this case 31) */
! #define MAX_SWAPFILES 31
#else
#define MAX_SWAPFILES 32
--- 11,20 ----
#ifdef CONFIG_COMP_CACHE
! /* Some architectures may deal with SWP_ENTRY differently, such as
! * UML. Using MAX_SWAPFILES for COMP_CACHE_SWP_TYPE may cause
! * problems, so let's decrease the maximum number in order to use
! * safely the last swap file type (in this case 31) */
+ #define MAX_SWAPFILES 31
#else
#define MAX_SWAPFILES 32
***************
*** 93,96 ****
--- 93,100 ----
extern int nr_swap_pages;
+
+ /* Swap 50% full? Release swapcache more aggressively.. */
+ #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
+
extern unsigned int nr_free_pages(void);
extern unsigned int nr_free_buffer_pages(void);
***************
*** 115,122 ****
extern void FASTCALL(lru_cache_del(struct page *));
- extern void FASTCALL(deactivate_page(struct page *));
- extern void FASTCALL(deactivate_page_nolock(struct page *));
extern void FASTCALL(activate_page(struct page *));
- extern void FASTCALL(activate_page_nolock(struct page *));
extern void swap_setup(void);
--- 119,123 ----
***************
*** 133,138 ****
/* linux/mm/swap_state.c */
extern void show_swap_cache_info(void);
! extern void add_to_swap_cache(struct page *, swp_entry_t);
extern void __delete_from_swap_cache(struct page *page);
extern void delete_from_swap_cache(struct page *page);
--- 134,142 ----
/* linux/mm/swap_state.c */
+ #define SWAP_CACHE_INFO
+ #ifdef SWAP_CACHE_INFO
extern void show_swap_cache_info(void);
! #endif
! extern int add_to_swap_cache(struct page *, swp_entry_t);
extern void __delete_from_swap_cache(struct page *page);
extern void delete_from_swap_cache(struct page *page);
***************
*** 142,148 ****
/* linux/mm/oom_kill.c */
! extern void oom_kill(void);
/* linux/mm/swapfile.c */
extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
--- 146,153 ----
/* linux/mm/oom_kill.c */
! extern void out_of_memory(void);
/* linux/mm/swapfile.c */
+ extern int total_swap_pages;
extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
***************
*** 156,159 ****
--- 161,165 ----
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
+ extern void free_swap_and_cache(swp_entry_t);
struct swap_list_t {
int head; /* head of priority-ordered swapfile list */
***************
*** 164,176 ****
asmlinkage long sys_swapon(const char *, int);
- #define SWAP_CACHE_INFO
-
- #ifdef SWAP_CACHE_INFO
- extern unsigned long swap_cache_add_total;
- extern unsigned long swap_cache_del_total;
- extern unsigned long swap_cache_find_total;
- extern unsigned long swap_cache_find_success;
- #endif
-
extern spinlock_t pagemap_lru_lock;
--- 170,173 ----
***************
*** 183,191 ****
#define DEBUG_LRU_PAGE(page) \
do { \
! if (PageActive(page)) \
! BUG(); \
! if (PageInactive(page)) \
BUG(); \
! if (page_count(page) == 0) \
BUG(); \
} while (0)
--- 180,186 ----
#define DEBUG_LRU_PAGE(page) \
do { \
! if (!PageLRU(page)) \
BUG(); \
! if (PageActive(page)) \
BUG(); \
} while (0)
***************
*** 202,206 ****
do { \
DEBUG_LRU_PAGE(page); \
- SetPageInactive(page); \
list_add(&(page)->lru, &inactive_list); \
nr_inactive_pages++; \
--- 197,200 ----
***************
*** 212,216 ****
ClearPageActive(page); \
nr_active_pages--; \
- DEBUG_LRU_PAGE(page); \
} while (0)
--- 206,209 ----
***************
*** 218,239 ****
do { \
list_del(&(page)->lru); \
- ClearPageInactive(page); \
nr_inactive_pages--; \
- DEBUG_LRU_PAGE(page); \
} while (0)
-
- /*
- * Ugly ugly ugly HACK to make sure the inactive lists
- * don't fill up with unfreeable ramdisk pages. We really
- * want to fix the ramdisk driver to mark its pages as
- * unfreeable instead of using dirty buffer magic, but the
- * next code-change time is when 2.5 is forked...
- */
- #ifndef _LINUX_KDEV_T_H
- #include <linux/kdev_t.h>
- #endif
- #ifndef _LINUX_MAJOR_H
- #include <linux/major.h>
- #endif
extern spinlock_t swaplock;
--- 211,216 ----
Index: sysctl.h
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/include/linux/sysctl.h,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -r1.1 -r1.2
*** sysctl.h 2001/10/01 22:43:59 1.1
--- sysctl.h 2001/12/12 20:45:46 1.2
***************
*** 63,67 ****
CTL_DEV=7, /* Devices */
CTL_BUS=8, /* Busses */
! CTL_ABI=9 /* Binary emulation */
};
--- 63,68 ----
CTL_DEV=7, /* Devices */
CTL_BUS=8, /* Busses */
! CTL_ABI=9, /* Binary emulation */
! CTL_CPU=10 /* CPU stuff (speed scaling, etc) */
};
***************
*** 122,125 ****
--- 123,127 ----
KERN_S390_USER_DEBUG_LOGGING=51, /* int: dumps of user faults */
KERN_CORE_USES_PID=52, /* int: use core or core.%pid */
+ KERN_TAINTED=53, /* int: various kernel tainted flags */
KERN_CADPID=54, /* int: PID of the process to notify on CAD */
};
***************
*** 138,146 ****
VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */
VM_PGT_CACHE=9, /* struct: Set page table cache parameters */
- #ifdef CONFIG_COMP_CACHE
VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
! VM_CTL_COMP_CACHE=11
#else
! VM_PAGE_CLUSTER=10 /* int: set number of pages to swap together */
#endif
};
--- 140,150 ----
VM_PAGERDAEMON=8, /* struct: Control kswapd behaviour */
VM_PGT_CACHE=9, /* struct: Set page table cache parameters */
VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
! VM_MIN_READAHEAD=12, /* Min file readahead */
! #ifdef CONFIG_COMP_CACHE
! VM_MAX_READAHEAD=13, /* Max file readahead */
! VM_CTL_COMP_CACHE=14
#else
! VM_MAX_READAHEAD=13 /* Max file readahead */
#endif
};
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:50
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache
In directory usw-pr-cvs1:/tmp/cvs-serv17791/mm/comp_cache
Modified Files:
aux.c free.c main.c swapin.c swapout.c vswap.c
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: aux.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/aux.c,v
retrieving revision 1.5
retrieving revision 1.6
diff -C2 -r1.5 -r1.6
*** aux.c 2001/10/01 22:43:59 1.5
--- aux.c 2001/12/12 20:45:46 1.6
***************
*** 2,6 ****
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2001-10-01 18:04:25 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/aux.c
*
! * Time-stamp: <2001-12-11 12:58:43 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 244,248 ****
BUG();
! if (virtual_swap_address(entry))
freeable -= fragment->compressed_size;
--- 244,248 ----
BUG();
! if (vswap_address(entry))
freeable -= fragment->compressed_size;
Index: free.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** free.c 2001/10/08 14:56:35 1.7
--- free.c 2001/12/12 20:45:46 1.8
***************
*** 2,6 ****
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2001-10-06 17:07:23 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/free.c
*
! * Time-stamp: <2001-12-12 15:38:59 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 152,156 ****
comp_page->number_of_pages--;
! if (virtual_swap_address((swp_entry_t) { fragment_to_free->index }))
vswap_add_freeable(comp_page, fragment_to_free);
--- 152,156 ----
comp_page->number_of_pages--;
! if (vswap_address((swp_entry_t) { fragment_to_free->index }))
vswap_add_freeable(comp_page, fragment_to_free);
***************
*** 213,216 ****
--- 213,217 ----
assign_address:
+
/* no virtual swap entry with a compressed page */
if (list_empty(&vswap_address_used_head))
***************
*** 306,310 ****
comp_cache_swp_free_generic(old_entry, 0);
! swap_duplicate(entry);
add_to_swap_cache(swap_cache_page, entry);
--- 307,311 ----
comp_cache_swp_free_generic(old_entry, 0);
! //swap_duplicate(entry);
add_to_swap_cache(swap_cache_page, entry);
***************
*** 336,341 ****
BUG();
! if (vswap->count)
BUG();
unlock_vswap(old_entry);
--- 337,346 ----
BUG();
! if (vswap->count) {
! swp_entry_t entry = SWP_ENTRY(31,vswap->offset);
!
! printk("entry: %08lx\n", entry.val);
BUG();
+ }
unlock_vswap(old_entry);
Index: main.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/main.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -r1.6 -r1.7
*** main.c 2001/10/01 22:43:59 1.6
--- main.c 2001/12/12 20:45:46 1.7
***************
*** 2,6 ****
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2001-10-01 19:11:16 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/main.c
*
! * Time-stamp: <2001-12-10 16:59:47 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 14,17 ****
--- 14,18 ----
#include <linux/comp_cache.h>
#include <linux/init.h>
+ #include <linux/pagemap.h>
#include <linux/WKcommon.h>
***************
*** 74,78 ****
comp_page = get_comp_cache_page(swap_cache_page, aux_comp_size, &fragment);
!
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
--- 75,79 ----
comp_page = get_comp_cache_page(swap_cache_page, aux_comp_size, &fragment);
!
/* if comp_page == NULL, get_comp_cache_page() gave up
* reserving a swap entry for this page, so we should return
***************
*** 85,89 ****
return 0;
}
!
/* TODO: add more buffers for compression. Only one may cause
* many page to be compressed twice */
--- 86,90 ----
return 0;
}
!
/* TODO: add more buffers for compression. Only one may cause
* many page to be compressed twice */
***************
*** 110,115 ****
out:
! UnlockPage(comp_page->page);
/* since there's no IO activity on this page */
UnlockPage(swap_cache_page);
--- 111,119 ----
out:
! if (!comp_page->page)
! BUG();
+ UnlockPage(comp_page->page);
+
/* since there's no IO activity on this page */
UnlockPage(swap_cache_page);
Index: swapin.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapin.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** swapin.c 2001/09/29 20:30:14 1.7
--- swapin.c 2001/12/12 20:45:46 1.8
***************
*** 2,6 ****
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2001-09-28 19:56:41 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapin.c
*
! * Time-stamp: <2001-12-12 16:04:48 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 126,133 ****
if (!new_page) {
- /* account for the compressed page which will be
- * hopefully added to swap cache in a near future */
- swap_duplicate(entry);
-
UnlockPage(comp_page->page);
--- 126,129 ----
***************
*** 180,191 ****
out:
/* let's free the page which has been allocated but not used */
! if (new_page) {
page_cache_release(new_page);
- swap_free(entry);
- }
if (comp_page && PageLocked(comp_page->page))
BUG();
!
return page;
}
--- 176,185 ----
out:
/* let's free the page which has been allocated but not used */
! if (new_page)
page_cache_release(new_page);
if (comp_page && PageLocked(comp_page->page))
BUG();
!
return page;
}
***************
*** 232,236 ****
page_cache_release(new_page);
wait_on_page(aux_page);
- swap_free(*entry);
}
--- 226,229 ----
***************
*** 244,252 ****
if (!PageCompCache(comp_page->page))
PAGE_BUG(comp_page->page);
!
if (!new_page) {
- /* account for the swap cache page */
- swap_duplicate(*entry);
-
aux_page = comp_page->page;
UnlockPage(comp_page->page);
--- 237,242 ----
if (!PageCompCache(comp_page->page))
PAGE_BUG(comp_page->page);
!
if (!new_page) {
aux_page = comp_page->page;
UnlockPage(comp_page->page);
***************
*** 291,298 ****
}
! if (new_page) {
page_cache_release(new_page);
- swap_free(*entry);
- }
out:
--- 281,286 ----
}
! if (new_page)
page_cache_release(new_page);
out:
Index: swapout.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/swapout.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -C2 -r1.7 -r1.8
*** swapout.c 2001/09/29 20:30:14 1.7
--- swapout.c 2001/12/12 20:45:46 1.8
***************
*** 2,6 ****
* linux/mm/comp_cache/swapout.c
*
! * Time-stamp: <2001-09-28 20:59:25 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/swapout.c
*
! * Time-stamp: <2001-12-11 12:59:11 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 223,227 ****
if (CompFragmentFreed(fragment)) {
! if (virtual_swap_address(entry)) {
comp_page->freeable += fragment->compressed_size;
--- 223,227 ----
if (CompFragmentFreed(fragment)) {
! if (vswap_address(entry)) {
comp_page->freeable += fragment->compressed_size;
***************
*** 237,241 ****
/* does the fragment have a virtual swap address?
* it cannot be swapped out */
! if (virtual_swap_address(entry)) {
if (fragment->offset < first_offset) {
first_offset = fragment->offset;
--- 237,241 ----
/* does the fragment have a virtual swap address?
* it cannot be swapped out */
! if (vswap_address(entry)) {
if (fragment->offset < first_offset) {
first_offset = fragment->offset;
***************
*** 262,266 ****
* lock the page in find_free_swp_buffer */
if (CompFragmentFreed(fragment)) {
! if (virtual_swap_address(entry))
BUG();
--- 262,266 ----
* lock the page in find_free_swp_buffer */
if (CompFragmentFreed(fragment)) {
! if (vswap_address(entry))
BUG();
***************
*** 563,567 ****
last_page = (last_page++)%NUM_MEAN_PAGES;
! if (virtual_swap_address((swp_entry_t) { fragment->index })) {
offset = SWP_OFFSET((swp_entry_t) { fragment->index });
--- 563,567 ----
last_page = (last_page++)%NUM_MEAN_PAGES;
! if (vswap_address((swp_entry_t) { fragment->index })) {
offset = SWP_OFFSET((swp_entry_t) { fragment->index });
Index: vswap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -r1.8 -r1.9
*** vswap.c 2001/10/08 14:56:35 1.8
--- vswap.c 2001/12/12 20:45:46 1.9
***************
*** 2,6 ****
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2001-10-08 11:31:54 rcastro>
*
* Linux Virtual Memory Compressed Cache
--- 2,6 ----
* linux/mm/comp_cache/vswap.c
*
! * Time-stamp: <2001-12-12 16:07:22 rcastro>
*
* Linux Virtual Memory Compressed Cache
***************
*** 70,74 ****
unsigned long offset;
! if (!virtual_swap_address(entry))
return;
--- 70,74 ----
unsigned long offset;
! if (!vswap_address(entry))
return;
***************
*** 82,86 ****
unsigned long offset;
! if (!virtual_swap_address(entry))
return;
--- 82,86 ----
unsigned long offset;
! if (!vswap_address(entry))
return;
***************
*** 129,133 ****
entry = SWP_ENTRY(type, offset);
! if (!virtual_swap_address(entry))
BUG();
--- 129,133 ----
entry = SWP_ENTRY(type, offset);
! if (!vswap_address(entry))
BUG();
***************
*** 141,145 ****
unsigned long offset = SWP_OFFSET(entry);
! if (!virtual_swap_address(entry))
BUG();
--- 141,145 ----
unsigned long offset = SWP_OFFSET(entry);
! if (!vswap_address(entry))
BUG();
***************
*** 150,154 ****
}
! void
comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment)
{
--- 150,154 ----
}
! int
comp_cache_swp_free_generic(swp_entry_t entry, int free_fragment)
{
***************
*** 158,162 ****
struct vswap_address * vswap;
! if (!virtual_swap_address(entry))
BUG();
--- 158,162 ----
struct vswap_address * vswap;
! if (!vswap_address(entry))
BUG();
***************
*** 167,171 ****
if (--vswap->count)
! return;
/* do we have a compressed page for this virtual entry? in the
--- 167,171 ----
if (--vswap->count)
! return vswap->count;
/* do we have a compressed page for this virtual entry? in the
***************
*** 218,221 ****
--- 218,223 ----
list_add(&(vswap->list), &vswap_address_free_head);
+
+ return vswap->count;
}
***************
*** 225,229 ****
unsigned long offset = SWP_OFFSET(entry);
! if (!virtual_swap_address(entry))
BUG();
--- 227,231 ----
unsigned long offset = SWP_OFFSET(entry);
! if (!vswap_address(entry))
BUG();
***************
*** 276,280 ****
entry.val = fragment->index;
! if (!virtual_swap_address(entry))
return;
--- 278,282 ----
entry.val = fragment->index;
! if (!vswap_address(entry))
return;
***************
*** 309,313 ****
return;
! if (!virtual_swap_address(pte_to_swp_entry(*ptep)))
return;
--- 311,315 ----
return;
! if (!vswap_address(pte_to_swp_entry(*ptep)))
return;
***************
*** 342,346 ****
unsigned long offset;
! if (!virtual_swap_address(entry))
return;
--- 344,348 ----
unsigned long offset;
! if (!vswap_address(entry))
return;
***************
*** 351,354 ****
--- 353,361 ----
vswap_address[offset]->swap_cache_page = page;
+
+ if (page != lookup_swap_cache(entry))
+ BUG();
+
+ page_cache_release(page);
}
***************
*** 361,365 ****
entry.val = page->index;
! if (!virtual_swap_address(entry))
return;
--- 368,372 ----
entry.val = page->index;
! if (!vswap_address(entry))
return;
***************
*** 377,381 ****
struct pte_list * tmp_pte_list;
! if (!virtual_swap_address(entry))
return;
--- 384,388 ----
struct pte_list * tmp_pte_list;
! if (!vswap_address(entry))
return;
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:49
|
Update of /cvsroot/linuxcompressed/linux/fs/proc
In directory usw-pr-cvs1:/tmp/cvs-serv17791/fs/proc
Modified Files:
proc_misc.c
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: proc_misc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/fs/proc/proc_misc.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -r1.1 -r1.2
*** proc_misc.c 2001/10/01 22:43:59 1.1
--- proc_misc.c 2001/12/12 20:45:46 1.2
***************
*** 36,39 ****
--- 36,40 ----
#include <linux/init.h>
#include <linux/smp_lock.h>
+ #include <linux/seq_file.h>
#ifdef CONFIG_COMP_CACHE
#include <linux/comp_cache.h>
***************
*** 53,70 ****
* wrappers, but this needs further analysis wrt potential overflows.
*/
- extern int get_cpuinfo(char *);
- extern int get_hardware_list(char *);
- extern int get_stram_list(char *);
- #ifdef CONFIG_DEBUG_MALLOC
- extern int get_malloc(char * buffer);
- #endif
#ifdef CONFIG_MODULES
extern int get_module_list(char *);
- extern int get_ksyms_list(char *, char **, off_t, int);
#endif
extern int get_device_list(char *);
extern int get_partition_list(char *, char **, off_t, int);
extern int get_filesystem_list(char *);
- extern int get_filesystem_info(char *);
extern int get_exec_domain_list(char *);
extern int get_irq_list(char *);
--- 54,63 ----
***************
*** 144,147 ****
--- 137,141 ----
struct sysinfo i;
int len;
+ int pg_size ;
/*
***************
*** 152,155 ****
--- 146,151 ----
si_meminfo(&i);
si_swapinfo(&i);
+ pg_size = atomic_read(&page_cache_size) - i.bufferram ;
+
len = sprintf(page, " total: used: free: shared: buffers: cached:\n"
"Mem: %8Lu %8Lu %8Lu %8Lu %8Lu %8Lu\n"
***************
*** 157,161 ****
B(i.totalram), B(i.totalram-i.freeram), B(i.freeram),
B(i.sharedram), B(i.bufferram),
! B(atomic_read(&page_cache_size)), B(i.totalswap),
B(i.totalswap-i.freeswap), B(i.freeswap));
/*
--- 153,157 ----
B(i.totalram), B(i.totalram-i.freeram), B(i.freeram),
B(i.sharedram), B(i.bufferram),
! B(pg_size), B(i.totalswap),
B(i.totalswap-i.freeswap), B(i.freeswap));
/*
***************
*** 183,187 ****
K(i.sharedram),
K(i.bufferram),
! K(atomic_read(&page_cache_size) - swapper_space.nrpages),
K(swapper_space.nrpages),
K(nr_active_pages),
--- 179,183 ----
K(i.sharedram),
K(i.bufferram),
! K(pg_size - swapper_space.nrpages),
K(swapper_space.nrpages),
K(nr_active_pages),
***************
*** 209,246 ****
return proc_calc_metrics(page, start, off, count, eof, len);
}
-
- static int cpuinfo_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
- {
- int len = get_cpuinfo(page);
- return proc_calc_metrics(page, start, off, count, eof, len);
- }
! #ifdef CONFIG_PROC_HARDWARE
! static int hardware_read_proc(char *page, char **start, off_t off,
! int count, int *eof, void *data)
{
! int len = get_hardware_list(page);
! return proc_calc_metrics(page, start, off, count, eof, len);
}
! #endif
!
! #ifdef CONFIG_STRAM_PROC
! static int stram_read_proc(char *page, char **start, off_t off,
! int count, int *eof, void *data)
! {
! int len = get_stram_list(page);
! return proc_calc_metrics(page, start, off, count, eof, len);
! }
! #endif
!
! #ifdef CONFIG_DEBUG_MALLOC
! static int malloc_read_proc(char *page, char **start, off_t off,
! int count, int *eof, void *data)
! {
! int len = get_malloc(page);
! return proc_calc_metrics(page, start, off, count, eof, len);
! }
! #endif
#ifdef CONFIG_MODULES
--- 205,220 ----
return proc_calc_metrics(page, start, off, count, eof, len);
}
! extern struct seq_operations cpuinfo_op;
! static int cpuinfo_open(struct inode *inode, struct file *file)
{
! return seq_open(file, &cpuinfo_op);
}
! static struct file_operations proc_cpuinfo_operations = {
! open: cpuinfo_open,
! read: seq_read,
! llseek: seq_lseek,
! release: seq_release,
! };
#ifdef CONFIG_MODULES
***************
*** 252,262 ****
}
! static int ksyms_read_proc(char *page, char **start, off_t off,
! int count, int *eof, void *data)
{
! int len = get_ksyms_list(page, start, off, count);
! if (len < count) *eof = 1;
! return len;
}
#endif
--- 226,240 ----
}
! extern struct seq_operations ksyms_op;
! static int ksyms_open(struct inode *inode, struct file *file)
{
! return seq_open(file, &ksyms_op);
}
+ static struct file_operations proc_ksyms_operations = {
+ open: ksyms_open,
+ read: seq_read,
+ llseek: seq_lseek,
+ release: seq_release,
+ };
#endif
***************
*** 492,502 ****
}
- static int mounts_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
- {
- int len = get_filesystem_info(page);
- return proc_calc_metrics(page, start, off, count, eof, len);
- }
-
static int execdomains_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
--- 470,473 ----
***************
*** 583,588 ****
--- 554,579 ----
};
+ extern struct seq_operations mounts_op;
+ static int mounts_open(struct inode *inode, struct file *file)
+ {
+ return seq_open(file, &mounts_op);
+ }
+ static struct file_operations proc_mounts_operations = {
+ open: mounts_open,
+ read: seq_read,
+ llseek: seq_lseek,
+ release: seq_release,
+ };
+
struct proc_dir_entry *proc_root_kcore;
+ static void create_seq_entry(char *name, mode_t mode, struct file_operations *f)
+ {
+ struct proc_dir_entry *entry;
+ entry = create_proc_entry(name, mode, NULL);
+ if (entry)
+ entry->proc_fops = f;
+ }
+
void __init proc_misc_init(void)
{
***************
*** 596,612 ****
{"meminfo", meminfo_read_proc},
{"version", version_read_proc},
- {"cpuinfo", cpuinfo_read_proc},
- #ifdef CONFIG_PROC_HARDWARE
- {"hardware", hardware_read_proc},
- #endif
- #ifdef CONFIG_STRAM_PROC
- {"stram", stram_read_proc},
- #endif
- #ifdef CONFIG_DEBUG_MALLOC
- {"malloc", malloc_read_proc},
- #endif
#ifdef CONFIG_MODULES
{"modules", modules_read_proc},
- {"ksyms", ksyms_read_proc},
#endif
{"stat", kstat_read_proc},
--- 587,592 ----
***************
*** 627,631 ****
#endif
{"locks", locks_read_proc},
- {"mounts", mounts_read_proc},
{"swaps", swaps_read_proc},
{"iomem", memory_read_proc},
--- 607,610 ----
***************
*** 640,643 ****
--- 619,627 ----
if (entry)
entry->proc_fops = &proc_kmsg_operations;
+ create_seq_entry("mounts", 0, &proc_mounts_operations);
+ create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
+ #ifdef CONFIG_MODULES
+ create_seq_entry("ksyms", 0, &proc_ksyms_operations);
+ #endif
proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
if (proc_root_kcore) {
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:49
|
Update of /cvsroot/linuxcompressed/linux/kernel
In directory usw-pr-cvs1:/tmp/cvs-serv17791/kernel
Modified Files:
sysctl.c
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: sysctl.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/kernel/sysctl.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -C2 -r1.1 -r1.2
*** sysctl.c 2001/10/01 22:43:59 1.1
--- sysctl.c 2001/12/12 20:45:46 1.2
***************
*** 178,181 ****
--- 178,183 ----
{KERN_CORE_USES_PID, "core_uses_pid", &core_uses_pid, sizeof(int),
0644, NULL, &proc_dointvec},
+ {KERN_TAINTED, "tainted", &tainted, sizeof(int),
+ 0644, NULL, &proc_dointvec},
{KERN_CAP_BSET, "cap-bound", &cap_bset, sizeof(kernel_cap_t),
0600, NULL, &proc_dointvec_bset},
***************
*** 276,279 ****
--- 278,285 ----
{VM_PAGE_CLUSTER, "page-cluster",
&page_cluster, sizeof(int), 0644, NULL, &proc_dointvec},
+ {VM_MIN_READAHEAD, "min-readahead",
+ &vm_min_readahead,sizeof(int), 0644, NULL, &proc_dointvec},
+ {VM_MAX_READAHEAD, "max-readahead",
+ &vm_max_readahead,sizeof(int), 0644, NULL, &proc_dointvec},
#ifdef CONFIG_COMP_CACHE
{VM_CTL_COMP_CACHE, "comp_cache", NULL, 0, 0555, comp_cache_table},
***************
*** 309,314 ****
{FS_NRDQUOT, "dquot-nr", &nr_dquots, 2*sizeof(int),
0444, NULL, &proc_dointvec},
- {FS_MAXDQUOT, "dquot-max", &max_dquots, sizeof(int),
- 0644, NULL, &proc_dointvec},
{FS_DENTRY, "dentry-state", &dentry_stat, 6*sizeof(int),
0444, NULL, &proc_dointvec},
--- 315,318 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:49
|
Update of /cvsroot/linuxcompressed/linux/fs
In directory usw-pr-cvs1:/tmp/cvs-serv17791/fs
Modified Files:
buffer.c
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: buffer.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/fs/buffer.c,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -r1.8 -r1.9
*** buffer.c 2001/09/29 20:30:14 1.8
--- buffer.c 2001/12/12 20:45:46 1.9
***************
*** 46,49 ****
--- 46,50 ----
#include <linux/iobuf.h>
#include <linux/highmem.h>
+ #include <linux/module.h>
#include <linux/completion.h>
***************
*** 53,70 ****
#include <asm/mmu_context.h>
[...1362 lines suppressed...]
*** 2629,2632 ****
--- 2557,2561 ----
return 0;
}
+ EXPORT_SYMBOL(try_to_free_buffers);
/* ================== Debugging =================== */
***************
*** 2725,2734 ****
for(i = 0; i < nr_hash; i++)
hash_table[i] = NULL;
-
- /* Setup free lists. */
- for(i = 0; i < NR_SIZES; i++) {
- free_list[i].list = NULL;
- free_list[i].lock = SPIN_LOCK_UNLOCKED;
- }
/* Setup lru lists. */
--- 2654,2657 ----
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-12-12 20:45:48
|
Update of /cvsroot/linuxcompressed/linux/arch/i386
In directory usw-pr-cvs1:/tmp/cvs-serv17791/arch/i386
Modified Files:
config.in
Log Message:
- 0.20pre2 version updated from 2.4.10 to 2.4.16.
- Code was rewritten in swapfile.c to work with the new swap file functions
(swap_free, swap_duplicate, swap_info_get, swap_info_put, etc).
Index: config.in
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/arch/i386/config.in,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -r1.8 -r1.9
*** config.in 2001/10/01 22:43:59 1.8
--- config.in 2001/12/12 20:45:45 1.9
***************
*** 150,153 ****
--- 150,154 ----
fi
tristate 'Toshiba Laptop support' CONFIG_TOSHIBA
+ tristate 'Dell Inspiron 8000 support' CONFIG_I8K
tristate '/dev/cpu/microcode - Intel IA32 CPU microcode support' CONFIG_MICROCODE
***************
*** 171,179 ****
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" != "y" ]; then
! bool 'APIC and IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC
if [ "$CONFIG_X86_UP_IOAPIC" = "y" ]; then
define_bool CONFIG_X86_IO_APIC y
- define_bool CONFIG_X86_LOCAL_APIC y
fi
fi
--- 172,185 ----
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" != "y" ]; then
! bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
! dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
! if [ "$CONFIG_X86_UP_APIC" = "y" ]; then
! define_bool CONFIG_X86_LOCAL_APIC y
! fi
if [ "$CONFIG_X86_UP_IOAPIC" = "y" ]; then
define_bool CONFIG_X86_IO_APIC y
fi
+ else
+ bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
fi
***************
*** 191,194 ****
--- 197,201 ----
int 'Compressed Cache Size (Memory Pages)' CONFIG_COMP_CACHE_SIZE 512
fi
+
bool 'Networking support' CONFIG_NET
***************
*** 234,239 ****
--- 241,248 ----
if [ "$CONFIG_HOTPLUG" = "y" ] ; then
source drivers/pcmcia/Config.in
+ source drivers/hotplug/Config.in
else
define_bool CONFIG_PCMCIA n
+ define_bool CONFIG_HOTPLUG_PCI n
fi
***************
*** 316,320 ****
source drivers/ieee1394/Config.in
! source drivers/i2o/Config.in
if [ "$CONFIG_NET" = "y" ]; then
--- 325,329 ----
source drivers/ieee1394/Config.in
! source drivers/message/i2o/Config.in
if [ "$CONFIG_NET" = "y" ]; then
***************
*** 399,402 ****
--- 408,412 ----
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
+ bool ' Debug high memory support' CONFIG_DEBUG_HIGHMEM
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Memory mapped I/O debugging' CONFIG_DEBUG_IOVIRT
|
|
From: Rodrigo S. de C. <rc...@us...> - 2001-10-08 14:56:38
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory usw-pr-cvs1:/tmp/cvs-serv2127/include/linux Modified Files: comp_cache.h swap.h Log Message: - In 2.4.10 the MAX_SWAPFILES was changed, what caused some problems for our code in architectures like UML, which handles swap entry addresses differently than in i386. That way, we cannot keep using MAX_SWAPFILES for our swap type unless we decrease this variable. In this case, we decreased the variable to 31 and so we can use 31 as COMP_CACHE_SWP_TYPE without worrying about arch-dependent configuration. Nevertheless, we are limited to 31 swap files, not 32 as in vanilla. Index: comp_cache.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/comp_cache.h,v retrieving revision 1.28 retrieving revision 1.29 diff -C2 -r1.28 -r1.29 *** comp_cache.h 2001/10/01 22:43:59 1.28 --- comp_cache.h 2001/10/08 14:56:35 1.29 *************** *** 2,6 **** * linux/mm/comp_cache.h * ! * Time-stamp: <2001-10-01 18:25:53 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache.h * ! * Time-stamp: <2001-10-06 17:17:13 rcastro> * * Linux Virtual Memory Compressed Cache Index: swap.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/swap.h,v retrieving revision 1.10 retrieving revision 1.11 diff -C2 -r1.10 -r1.11 *** swap.h 2001/09/29 20:30:14 1.10 --- swap.h 2001/10/08 14:56:35 1.11 *************** *** 9,13 **** --- 9,23 ---- #define SWAP_FLAG_PRIO_SHIFT 0 + #ifdef CONFIG_COMP_CACHE + + /* some architectures may deal with SWP_ENTRY differently, such as + * UML. Use MAX_SWAPFILES for COMP_CACHE_SWP_TYPE may cause problems, + * so let's decrease the maximum number in order to use safely the + * last swap file type (in this case 31) */ + #define MAX_SWAPFILES 31 + + #else #define MAX_SWAPFILES 32 + #endif /* |
|
From: Rodrigo S. de C. <rc...@us...> - 2001-10-08 14:56:38
|
Update of /cvsroot/linuxcompressed/linux/mm/comp_cache In directory usw-pr-cvs1:/tmp/cvs-serv2127/mm/comp_cache Modified Files: free.c vswap.c Log Message: - In 2.4.10 the MAX_SWAPFILES was changed, what caused some problems for our code in architectures like UML, which handles swap entry addresses differently than in i386. That way, we cannot keep using MAX_SWAPFILES for our swap type unless we decrease this variable. In this case, we decreased the variable to 31 and so we can use 31 as COMP_CACHE_SWP_TYPE without worrying about arch-dependent configuration. Nevertheless, we are limited to 31 swap files, not 32 as in vanilla. Index: free.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/free.c,v retrieving revision 1.6 retrieving revision 1.7 diff -C2 -r1.6 -r1.7 *** free.c 2001/09/29 20:30:14 1.6 --- free.c 2001/10/08 14:56:35 1.7 *************** *** 2,6 **** * linux/mm/comp_cache/free.c * ! * Time-stamp: <2001-09-29 16:09:04 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/free.c * ! * Time-stamp: <2001-10-06 17:07:23 rcastro> * * Linux Virtual Memory Compressed Cache Index: vswap.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/mm/comp_cache/vswap.c,v retrieving revision 1.7 retrieving revision 1.8 diff -C2 -r1.7 -r1.8 *** vswap.c 2001/10/01 22:43:59 1.7 --- vswap.c 2001/10/08 14:56:35 1.8 *************** *** 2,6 **** * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2001-10-01 18:27:32 rcastro> * * Linux Virtual Memory Compressed Cache --- 2,6 ---- * linux/mm/comp_cache/vswap.c * ! * Time-stamp: <2001-10-08 11:31:54 rcastro> * * Linux Virtual Memory Compressed Cache *************** *** 385,389 **** tmp_pte_list->next = vswap_address[offset]->pte_list; tmp_pte_list->ptep = ptep; ! vswap_address[offset]->pte_list = tmp_pte_list; } --- 385,389 ---- tmp_pte_list->next = vswap_address[offset]->pte_list; tmp_pte_list->ptep = ptep; ! vswap_address[offset]->pte_list = tmp_pte_list; } |