[lc-checkins] CVS: linux/include/linux fs.h,1.4,1.5 mm.h,1.18,1.19 swap.h,1.19,1.20 sysctl.h,1.6,1.7
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2003-05-19 01:39:20
|
Update of /cvsroot/linuxcompressed/linux/include/linux In directory sc8-pr-cvs1:/tmp/cvs-serv25395/include/linux Modified Files: fs.h mm.h swap.h sysctl.h Log Message: o Port code to 2.4.20 Bug fix (?) o Changes checks in vswap.c to avoid oopses. It will BUG() instead. Some of the checks were done after the value had been accessed. Note o Virtual swap addresses are temporarily disabled, due to debugging sessions related to the use of swap files instead of swap partitions. Index: fs.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/fs.h,v retrieving revision 1.4 retrieving revision 1.5 diff -C2 -r1.4 -r1.5 *** fs.h 27 Feb 2002 19:58:51 -0000 1.4 --- fs.h 19 May 2003 01:38:46 -0000 1.5 *************** *** 207,210 **** --- 207,211 ---- extern void inode_init(unsigned long); extern void mnt_init(unsigned long); + extern void files_init(unsigned long mempages); /* bh state bits */ *************** *** 218,222 **** BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */ BH_Wait_IO, /* 1 if we should write out this buffer */ ! BH_launder, /* 1 if we should throttle on this buffer */ BH_JBD, /* 1 if it has an attached journal_head */ --- 219,223 ---- BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */ BH_Wait_IO, /* 1 if we should write out this buffer */ ! BH_Launder, /* 1 if we can throttle on this buffer */ BH_JBD, /* 1 if it has an attached journal_head */ *************** *** 226,229 **** --- 227,232 ---- }; + #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) + /* * Try to keep the most commonly used fields in single cache lines (16 *************** *** 280,283 **** --- 283,287 ---- #define buffer_new(bh) __buffer_state(bh,New) #define buffer_async(bh) __buffer_state(bh,Async) + #define buffer_launder(bh) __buffer_state(bh,Launder) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) *************** *** 556,559 **** --- 560,571 ---- #define MAX_NON_LFS ((1UL<<31) - 1) + /* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ + #if BITS_PER_LONG==32 + #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + #elif BITS_PER_LONG==64 + #define MAX_LFS_FILESIZE 0x7fffffffffffffff + #endif + #define FL_POSIX 1 #define FL_FLOCK 2 *************** *** 590,593 **** --- 602,606 ---- struct fasync_struct * fl_fasync; /* for lease break notifications */ + unsigned long fl_break_time; /* for nonblocking lease breaks */ union { *************** *** 859,862 **** --- 872,879 ---- int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct dentry *, struct iattr *); + int (*setxattr) (struct dentry *, const char *, void *, size_t, int); + ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*removexattr) (struct dentry *, const char *); }; *************** *** 1045,1049 **** static inline int get_lease(struct inode *inode, unsigned int mode) { ! if (inode->i_flock && (inode->i_flock->fl_flags & FL_LEASE)) return __get_lease(inode, mode); return 0; --- 1062,1066 ---- static inline int get_lease(struct inode *inode, unsigned int mode) { ! if (inode->i_flock) return __get_lease(inode, mode); return 0; *************** *** 1108,1112 **** extern int fs_may_remount_ro(struct super_block *); ! extern int try_to_free_buffers(struct page *, unsigned int); extern void refile_buffer(struct buffer_head * buf); extern void create_empty_buffers(struct page *, kdev_t, unsigned long); --- 1125,1129 ---- extern int fs_may_remount_ro(struct super_block *); ! extern int FASTCALL(try_to_free_buffers(struct page *, unsigned int)); extern void refile_buffer(struct buffer_head * buf); extern void create_empty_buffers(struct page *, kdev_t, unsigned long); *************** *** 1159,1165 **** extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh)); extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh)); extern void FASTCALL(buffer_insert_inode_data_queue(struct buffer_head *, struct inode *)); ! #define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state) static inline void mark_buffer_async(struct buffer_head * bh, int on) --- 1176,1186 ---- extern void FASTCALL(__mark_buffer_dirty(struct buffer_head *bh)); extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh)); + extern void FASTCALL(buffer_insert_inode_queue(struct buffer_head *, struct inode *)); extern void FASTCALL(buffer_insert_inode_data_queue(struct buffer_head *, struct inode *)); ! static inline int atomic_set_buffer_dirty(struct buffer_head *bh) ! { ! return test_and_set_bit(BH_Dirty, &bh->b_state); ! } static inline void mark_buffer_async(struct buffer_head * bh, int on) *************** *** 1186,1190 **** } - extern void buffer_insert_inode_queue(struct buffer_head *, struct inode *); static inline void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) { --- 1207,1210 ---- *************** *** 1214,1221 **** extern int fsync_no_super(kdev_t); extern void sync_inodes_sb(struct super_block *); ! extern int osync_inode_buffers(struct inode *); ! extern int osync_inode_data_buffers(struct inode *); ! extern int fsync_inode_buffers(struct inode *); ! extern int fsync_inode_data_buffers(struct inode *); extern int inode_has_buffers(struct inode *); extern int filemap_fdatasync(struct address_space *); --- 1234,1246 ---- extern int fsync_no_super(kdev_t); extern void sync_inodes_sb(struct super_block *); ! extern int fsync_buffers_list(struct list_head *); ! static inline int fsync_inode_buffers(struct inode *inode) ! { ! return fsync_buffers_list(&inode->i_dirty_buffers); ! } ! static inline int fsync_inode_data_buffers(struct inode *inode) ! { ! return fsync_buffers_list(&inode->i_dirty_data_buffers); ! } extern int inode_has_buffers(struct inode *); extern int filemap_fdatasync(struct address_space *); *************** *** 1313,1316 **** --- 1338,1342 ---- extern int FASTCALL(path_init(const char *, unsigned, struct nameidata *)); extern int FASTCALL(path_walk(const char *, struct nameidata *)); + extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *)); extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); extern void path_release(struct nameidata *); *************** *** 1371,1374 **** --- 1397,1402 ---- } extern int set_blocksize(kdev_t, int); + extern int sb_set_blocksize(struct super_block *, int); + extern int sb_min_blocksize(struct super_block *, int); extern struct buffer_head * bread(kdev_t, int, int); static inline struct buffer_head * sb_bread(struct super_block *sb, int block) *************** *** 1433,1437 **** --- 1461,1470 ---- extern int vfs_readdir(struct file *, filldir_t, void *); + extern int dcache_dir_open(struct inode *, struct file *); + extern int dcache_dir_close(struct inode *, struct file *); + extern loff_t dcache_dir_lseek(struct file *, loff_t, int); + extern int dcache_dir_fsync(struct file *, struct dentry *, int); extern int dcache_readdir(struct file *, void *, filldir_t); + extern struct file_operations dcache_dir_ops; extern struct file_system_type *get_fs_type(const char *name); *************** *** 1454,1462 **** extern void show_buffers(void); - extern void mount_root(void); #ifdef CONFIG_BLK_DEV_INITRD extern unsigned int real_root_dev; - extern int change_root(kdev_t, const char *); #endif --- 1487,1493 ---- Index: mm.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/mm.h,v retrieving revision 1.18 retrieving revision 1.19 diff -C2 -r1.18 -r1.19 *** mm.h 10 Sep 2002 16:43:04 -0000 1.18 --- mm.h 19 May 2003 01:38:47 -0000 1.19 *************** *** 16,19 **** --- 16,20 ---- extern unsigned long max_mapnr; extern unsigned long num_physpages; + extern unsigned long num_mappedpages; extern void * high_memory; extern int page_cluster; *************** *** 160,169 **** struct list_head lru; /* Pageout list, eg. active_list; protected by pagemap_lru_lock !! */ - wait_queue_head_t wait; /* Page locked? Stand in line... */ struct page **pprev_hash; /* Complement to *next_hash. */ struct buffer_head * buffers; /* Buffer maps us to a disk block. */ void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ ! struct zone_struct *zone; /* Memory zone we are in. */ } mem_map_t; --- 161,181 ---- struct list_head lru; /* Pageout list, eg. active_list; protected by pagemap_lru_lock !! */ struct page **pprev_hash; /* Complement to *next_hash. */ struct buffer_head * buffers; /* Buffer maps us to a disk block. */ + + /* + * On machines where all RAM is mapped into kernel address space, + * we can simply calculate the virtual address. On machines with + * highmem some memory is mapped into kernel virtual memory + * dynamically, so we need a place to store that address. + * Note that this field could be 16 bits on x86 ... ;) + * + * Architectures with slow multiplication can define + * WANT_PAGE_VIRTUAL in asm/page.h + */ + #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ ! #endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */ } mem_map_t; *************** *** 240,244 **** * to swap space and (later) to be read back into memory. * During disk I/O, PG_locked is used. This bit is set before I/O ! * and reset when I/O completes. page->wait is a wait queue of all * tasks waiting for the I/O on this page to complete. * PG_uptodate tells whether the page's contents is valid. --- 252,256 ---- * to swap space and (later) to be read back into memory. * During disk I/O, PG_locked is used. This bit is set before I/O ! * and reset when I/O completes. page_waitqueue(page) is a wait queue of all * tasks waiting for the I/O on this page to complete. * PG_uptodate tells whether the page's contents is valid. *************** *** 306,309 **** --- 318,375 ---- #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) + /* + * The zone field is never updated after free_area_init_core() + * sets it, so none of the operations on it need to be atomic. + */ + #define NODE_SHIFT 4 + #define ZONE_SHIFT (BITS_PER_LONG - 8) + + struct zone_struct; + extern struct zone_struct *zone_table[]; + + static inline zone_t *page_zone(struct page *page) + { + return zone_table[page->flags >> ZONE_SHIFT]; + } + + static inline void set_page_zone(struct page *page, unsigned long zone_num) + { + page->flags &= ~(~0UL << ZONE_SHIFT); + page->flags |= zone_num << ZONE_SHIFT; + } + + /* + * In order to avoid #ifdefs within C code itself, we define + * set_page_address to a noop for non-highmem machines, where + * the field isn't useful. + * The same is true for page_address() in arch-dependent code. + */ + #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) + + #define set_page_address(page, address) \ + do { \ + (page)->virtual = (address); \ + } while(0) + + #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ + #define set_page_address(page, address) do { } while(0) + #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ + + /* + * Permanent address of a page. Obviously must never be + * called on a highmem page. + */ + #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) + + #define page_address(page) ((page)->virtual) + + #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ + + #define page_address(page) \ + __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \ + + page_zone(page)->zone_start_paddr) + + #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ + extern void FASTCALL(set_page_dirty(struct page *)); #ifdef CONFIG_COMP_CACHE *************** *** 627,630 **** --- 693,698 ---- extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr); + + extern struct page * vmalloc_to_page(void *addr); #endif /* __KERNEL__ */ Index: swap.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/swap.h,v retrieving revision 1.19 retrieving revision 1.20 diff -C2 -r1.19 -r1.20 *** swap.h 29 Nov 2002 21:23:02 -0000 1.19 --- swap.h 19 May 2003 01:38:47 -0000 1.20 *************** *** 122,129 **** extern int nr_active_pages; extern int nr_inactive_pages; - extern atomic_t nr_async_pages; extern atomic_t page_cache_size; extern atomic_t buffermem_pages; ! extern spinlock_t pagecache_lock; extern void __remove_inode_page(struct page *); --- 122,131 ---- extern int nr_active_pages; extern int nr_inactive_pages; extern atomic_t page_cache_size; extern atomic_t buffermem_pages; ! ! extern spinlock_cacheline_t pagecache_lock_cacheline; ! #define pagecache_lock (pagecache_lock_cacheline.lock) ! extern void __remove_inode_page(struct page *); *************** *** 146,150 **** /* linux/mm/vmscan.c */ extern wait_queue_head_t kswapd_wait; ! extern int FASTCALL(try_to_free_pages(zone_t *, unsigned int, unsigned int)); /* linux/mm/page_io.c */ --- 148,153 ---- /* linux/mm/vmscan.c */ extern wait_queue_head_t kswapd_wait; ! extern int FASTCALL(try_to_free_pages_zone(zone_t *, unsigned int)); ! extern int FASTCALL(try_to_free_pages(unsigned int)); /* linux/mm/page_io.c */ *************** *** 207,211 **** #endif ! extern spinlock_t pagemap_lru_lock; extern void FASTCALL(mark_page_accessed(struct page *)); --- 210,215 ---- #endif ! extern spinlock_cacheline_t pagemap_lru_lock_cacheline; ! #define pagemap_lru_lock pagemap_lru_lock_cacheline.lock extern void FASTCALL(mark_page_accessed(struct page *)); Index: sysctl.h =================================================================== RCS file: /cvsroot/linuxcompressed/linux/include/linux/sysctl.h,v retrieving revision 1.6 retrieving revision 1.7 diff -C2 -r1.6 -r1.7 *** sysctl.h 22 Nov 2002 16:01:34 -0000 1.6 --- sysctl.h 19 May 2003 01:38:47 -0000 1.7 *************** *** 141,147 **** VM_PGT_CACHE=9, /* struct: Set page table cache parameters */ VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ ! VM_MIN_READAHEAD=12, /* Min file readahead */ ! VM_MAX_READAHEAD=13, /* Max file readahead */ ! VM_CTL_COMP_CACHE=14 }; --- 141,148 ---- VM_PGT_CACHE=9, /* struct: Set page table cache parameters */ VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */ ! VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */ ! VM_MIN_READAHEAD=12, /* Min file readahead */ ! VM_MAX_READAHEAD=13, /* Max file readahead */ ! VM_CTL_COMP_CACHE=14 }; *************** *** 206,210 **** NET_CORE_NO_CONG=14, NET_CORE_LO_CONG=15, ! NET_CORE_MOD_CONG=16 }; --- 207,212 ---- NET_CORE_NO_CONG=14, NET_CORE_LO_CONG=15, ! NET_CORE_MOD_CONG=16, ! NET_CORE_DEV_WEIGHT=17 }; *************** *** 291,295 **** NET_IPV4_NONLOCAL_BIND=88, NET_IPV4_ICMP_RATELIMIT=89, ! NET_IPV4_ICMP_RATEMASK=90 }; --- 293,298 ---- NET_IPV4_NONLOCAL_BIND=88, NET_IPV4_ICMP_RATELIMIT=89, ! NET_IPV4_ICMP_RATEMASK=90, ! NET_TCP_TW_REUSE=91 }; *************** *** 336,340 **** NET_IPV4_CONF_LOG_MARTIANS=11, NET_IPV4_CONF_TAG=12, ! NET_IPV4_CONF_ARPFILTER=13 }; --- 339,344 ---- NET_IPV4_CONF_LOG_MARTIANS=11, NET_IPV4_CONF_TAG=12, ! NET_IPV4_CONF_ARPFILTER=13, ! NET_IPV4_CONF_MEDIUM_ID=14, }; |