[lc-checkins] CVS: linux/mm filemap.c,1.14,1.15 memory.c,1.22,1.23 mmap.c,1.6,1.7 page_alloc.c,1.15,
Status: Beta
Brought to you by:
nitin_sf
|
From: Rodrigo S. de C. <rc...@us...> - 2002-02-26 20:59:06
|
Update of /cvsroot/linuxcompressed/linux/mm
In directory usw-pr-cvs1:/tmp/cvs-serv20377/mm
Modified Files:
filemap.c memory.c mmap.c page_alloc.c shmem.c swapfile.c
vmscan.c
Log Message:
- Update to 2.4.18
- Some minor changes to make the code compile when CONFIG_COMP_CACHE is
disabled
Index: filemap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/filemap.c,v
retrieving revision 1.14
retrieving revision 1.15
diff -C2 -r1.14 -r1.15
*** filemap.c 26 Feb 2002 16:01:10 -0000 1.14
--- filemap.c 26 Feb 2002 20:59:01 -0000 1.15
***************
*** 471,475 ****
spin_unlock(&pagecache_lock);
! if (!list_empty(&mapping->dirty_comp_pages))
goto try_again;
--- 471,475 ----
spin_unlock(&pagecache_lock);
! if (there_are_dirty_comp_pages(mapping))
goto try_again;
***************
*** 496,534 ****
}
- /*
- * By the time this is called, the page is locked and
- * we don't have to worry about any races any more.
- *
- * Start the IO..
- */
- static int writeout_one_page(struct page *page)
- {
- struct buffer_head *bh, *head = page->buffers;
-
- bh = head;
- do {
- if (buffer_locked(bh) || !buffer_dirty(bh) || !buffer_uptodate(bh))
- continue;
-
- bh->b_flushtime = jiffies;
- ll_rw_block(WRITE, 1, &bh);
- } while ((bh = bh->b_this_page) != head);
- return 0;
- }
-
- int waitfor_one_page(struct page *page)
- {
- int error = 0;
- struct buffer_head *bh, *head = page->buffers;
-
- bh = head;
- do {
- wait_on_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh))
- error = -EIO;
- } while ((bh = bh->b_this_page) != head);
- return error;
- }
-
static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *))
{
--- 496,499 ----
***************
*** 624,629 ****
*
*/
! void filemap_fdatasync(struct address_space * mapping)
{
int (*writepage)(struct page *) = mapping->a_ops->writepage;
--- 589,595 ----
*
*/
! int filemap_fdatasync(struct address_space * mapping)
{
+ int ret = 0;
int (*writepage)(struct page *) = mapping->a_ops->writepage;
***************
*** 648,653 ****
if (PageDirty(page)) {
ClearPageDirty(page);
! writepage(page);
} else
UnlockPage(page);
--- 614,622 ----
if (PageDirty(page)) {
+ int err;
ClearPageDirty(page);
! err = writepage(page);
! if (err && !ret)
! ret = err;
} else
UnlockPage(page);
***************
*** 661,666 ****
* might get compressed. It may also happen to compress dirty
* pages when allocating new pages in the first loop. */
! if (!list_empty(&mapping->dirty_comp_pages))
goto try_again;
}
--- 630,636 ----
* might get compressed. It may also happen to compress dirty
* pages when allocating new pages in the first loop. */
! if (there_are_dirty_comp_pages(mapping))
goto try_again;
+ return ret;
}
***************
*** 672,677 ****
*
*/
! void filemap_fdatawait(struct address_space * mapping)
{
spin_lock(&pagecache_lock);
--- 642,649 ----
*
*/
! int filemap_fdatawait(struct address_space * mapping)
{
+ int ret = 0;
+
spin_lock(&pagecache_lock);
***************
*** 689,692 ****
--- 661,666 ----
___wait_on_page(page);
+ if (PageError(page))
+ ret = -EIO;
page_cache_release(page);
***************
*** 694,697 ****
--- 668,672 ----
}
spin_unlock(&pagecache_lock);
+ return ret;
}
***************
*** 1064,1068 ****
if (!page) {
struct page *newpage = alloc_page(gfp_mask);
- page = NULL;
if (newpage) {
spin_lock(&pagecache_lock);
--- 1039,1042 ----
***************
*** 1651,1660 ****
/*
! * Flush to disk exlusively the _data_, metadata must remains
* completly asynchronous or performance will go to /dev/null.
*/
! filemap_fdatasync(mapping);
! retval = fsync_inode_data_buffers(inode);
! filemap_fdatawait(mapping);
if (retval < 0)
goto out_free;
--- 1625,1636 ----
/*
! * Flush to disk exclusively the _data_, metadata must remain
* completly asynchronous or performance will go to /dev/null.
*/
! retval = filemap_fdatasync(mapping);
! if (retval == 0)
! retval = fsync_inode_data_buffers(inode);
! if (retval == 0)
! retval = filemap_fdatawait(mapping);
if (retval < 0)
goto out_free;
***************
*** 2273,2296 ****
*/
static int msync_interval(struct vm_area_struct * vma,
unsigned long start, unsigned long end, int flags)
{
struct file * file = vma->vm_file;
if (file && (vma->vm_flags & VM_SHARED)) {
! int error;
! error = filemap_sync(vma, start, end-start, flags);
! if (!error && (flags & MS_SYNC)) {
struct inode * inode = file->f_dentry->d_inode;
down(&inode->i_sem);
! filemap_fdatasync(inode->i_mapping);
! if (file->f_op && file->f_op->fsync)
! error = file->f_op->fsync(file, file->f_dentry, 1);
! filemap_fdatawait(inode->i_mapping);
up(&inode->i_sem);
}
- return error;
}
! return 0;
}
--- 2249,2291 ----
*/
+ /*
+ * MS_SYNC syncs the entire file - including mappings.
+ *
+ * MS_ASYNC initiates writeout of just the dirty mapped data.
+ * This provides no guarantee of file integrity - things like indirect
+ * blocks may not have started writeout. MS_ASYNC is primarily useful
+ * where the application knows that it has finished with the data and
+ * wishes to intelligently schedule its own I/O traffic.
+ */
static int msync_interval(struct vm_area_struct * vma,
unsigned long start, unsigned long end, int flags)
{
+ int ret = 0;
struct file * file = vma->vm_file;
+
if (file && (vma->vm_flags & VM_SHARED)) {
! ret = filemap_sync(vma, start, end-start, flags);
! if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
struct inode * inode = file->f_dentry->d_inode;
+
down(&inode->i_sem);
! ret = filemap_fdatasync(inode->i_mapping);
! if (flags & MS_SYNC) {
! int err;
!
! if (file->f_op && file->f_op->fsync) {
! err = file->f_op->fsync(file, file->f_dentry, 1);
! if (err && !ret)
! ret = err;
! }
! err = filemap_fdatawait(inode->i_mapping);
! if (err && !ret)
! ret = err;
! }
up(&inode->i_sem);
}
}
! return ret;
}
***************
*** 3150,3154 ****
status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
if (status)
! goto unlock;
page_fault = __copy_from_user(kaddr+offset, buf, bytes);
flush_dcache_page(page);
--- 3145,3149 ----
status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
if (status)
! goto sync_failure;
page_fault = __copy_from_user(kaddr+offset, buf, bytes);
flush_dcache_page(page);
***************
*** 3175,3178 ****
--- 3170,3174 ----
break;
} while (count);
+ done:
*ppos = pos;
***************
*** 3196,3199 ****
--- 3192,3207 ----
status = -EFAULT;
goto unlock;
+
+ sync_failure:
+ /*
+ * If blocksize < pagesize, prepare_write() may have instantiated a
+ * few blocks outside i_size. Trim these off again.
+ */
+ kunmap(page);
+ UnlockPage(page);
+ page_cache_release(page);
+ if (pos + bytes > inode->i_size)
+ vmtruncate(inode, inode->i_size);
+ goto done;
o_direct:
Index: memory.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/memory.c,v
retrieving revision 1.22
retrieving revision 1.23
diff -C2 -r1.22 -r1.23
*** memory.c 25 Feb 2002 19:34:41 -0000 1.22
--- memory.c 26 Feb 2002 20:59:01 -0000 1.23
***************
*** 180,184 ****
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
! unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address)-1;
--- 180,184 ----
unsigned long address = vma->vm_start;
unsigned long end = vma->vm_end;
! unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
src_pgd = pgd_offset(src, address)-1;
***************
*** 249,253 ****
/* If it's a COW mapping, write protect it both in the parent and the child */
! if (cow) {
ptep_set_wrprotect(src_pte);
pte = *src_pte;
--- 249,253 ----
/* If it's a COW mapping, write protect it both in the parent and the child */
! if (cow && pte_write(pte)) {
ptep_set_wrprotect(src_pte);
pte = *src_pte;
***************
*** 447,454 ****
}
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)
{
! int i = 0;
do {
--- 447,470 ----
}
+ /*
+ * Please read Documentation/cachetlb.txt before using this function,
+ * accessing foreign memory spaces can cause cache coherency problems.
+ *
+ * Accessing a VM_IO area is even more dangerous, therefore the function
+ * fails if pages is != NULL and a VM_IO area is found.
+ */
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas)
{
! int i;
! unsigned int flags;
!
! /*
! * Require read or write permissions.
! * If 'force' is set, we only require the "MAY" flags.
! */
! flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
! flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
! i = 0;
do {
***************
*** 457,467 ****
vma = find_extend_vma(mm, start);
! if ( !vma ||
! (!force &&
! ((write && (!(vma->vm_flags & VM_WRITE))) ||
! (!write && (!(vma->vm_flags & VM_READ))) ) )) {
! if (i) return i;
! return -EFAULT;
! }
spin_lock(&mm->page_table_lock);
--- 473,478 ----
vma = find_extend_vma(mm, start);
! if ( !vma || (pages && vma->vm_flags & VM_IO) || !(flags & vma->vm_flags) )
! return i ? : -EFAULT;
spin_lock(&mm->page_table_lock);
***************
*** 491,496 ****
* depending on the type of the found page
*/
! if (pages[i])
! page_cache_get(pages[i]);
}
if (vmas)
--- 502,508 ----
* depending on the type of the found page
*/
! if (!pages[i])
! goto bad_page;
! page_cache_get(pages[i]);
}
if (vmas)
***************
*** 502,506 ****
--- 514,530 ----
spin_unlock(&mm->page_table_lock);
} while(len);
+ out:
return i;
+
+ /*
+ * We found an invalid page in the VMA. Release all we have
+ * so far and fail.
+ */
+ bad_page:
+ spin_unlock(&mm->page_table_lock);
+ while (i--)
+ page_cache_release(pages[i]);
+ i = -EFAULT;
+ goto out;
}
Index: mmap.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/mmap.c,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -r1.6 -r1.7
*** mmap.c 14 Jan 2002 12:05:08 -0000 1.6
--- mmap.c 26 Feb 2002 20:59:01 -0000 1.7
***************
*** 625,629 ****
if (flags & MAP_FIXED) {
if (addr > TASK_SIZE - len)
! return -EINVAL;
if (addr & ~PAGE_MASK)
return -EINVAL;
--- 625,629 ----
if (flags & MAP_FIXED) {
if (addr > TASK_SIZE - len)
! return -ENOMEM;
if (addr & ~PAGE_MASK)
return -EINVAL;
Index: page_alloc.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/page_alloc.c,v
retrieving revision 1.15
retrieving revision 1.16
diff -C2 -r1.15 -r1.16
*** page_alloc.c 23 Feb 2002 18:24:11 -0000 1.15
--- page_alloc.c 26 Feb 2002 20:59:01 -0000 1.16
***************
*** 71,74 ****
--- 71,80 ----
zone_t *zone;
+ /* Yes, think what happens when other parts of the kernel take
+ * a reference to a page in order to pin it for io. -ben
+ */
+ if (PageLRU(page))
+ lru_cache_del(page);
+
if (page->buffers)
BUG();
***************
*** 425,437 ****
}
return 0;
- }
-
- void page_cache_release(struct page *page)
- {
- if (!PageReserved(page) && put_page_testzero(page)) {
- if (PageLRU(page))
- lru_cache_del(page);
- __free_pages_ok(page, 0);
- }
}
--- 431,434 ----
Index: shmem.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/shmem.c,v
retrieving revision 1.16
retrieving revision 1.17
diff -C2 -r1.16 -r1.17
*** shmem.c 25 Feb 2002 19:34:41 -0000 1.16
--- shmem.c 26 Feb 2002 20:59:01 -0000 1.17
***************
*** 754,757 ****
--- 754,762 ----
int err;
+ if ((ssize_t) count < 0)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
down(&inode->i_sem);
Index: swapfile.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/swapfile.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -C2 -r1.26 -r1.27
*** swapfile.c 23 Feb 2002 18:24:11 -0000 1.26
--- swapfile.c 26 Feb 2002 20:59:01 -0000 1.27
***************
*** 383,387 ****
page_cache_get(page);
/* Only cache user (+us), or swap space full? Free it! */
! if (page_count(page) == 2 || vm_swap_full()) {
delete_from_swap_cache(page);
SetPageDirty(page);
--- 383,387 ----
page_cache_get(page);
/* Only cache user (+us), or swap space full? Free it! */
! if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
delete_from_swap_cache(page);
SetPageDirty(page);
Index: vmscan.c
===================================================================
RCS file: /cvsroot/linuxcompressed/linux/mm/vmscan.c,v
retrieving revision 1.28
retrieving revision 1.29
diff -C2 -r1.28 -r1.29
*** vmscan.c 25 Feb 2002 19:34:41 -0000 1.28
--- vmscan.c 26 Feb 2002 20:59:01 -0000 1.29
***************
*** 509,526 ****
/* compress it if it's a clean page that has not been
! * compressed in a previous iteration and it has once
! * been mapped (and so decompressed if we are using
! * compressed swap) */
! if (!PageCompCache(page)) {
! page_cache_get(page);
! spin_unlock(&pagecache_lock);
!
! compress_page(page, 0, gfp_mask);
! page_cache_release(page);
!
! spin_lock(&pagemap_lru_lock);
continue;
! }
!
/* point of no return */
if (likely(!PageSwapCache(page))) {
--- 509,516 ----
/* compress it if it's a clean page that has not been
! * compressed in a previous iteration */
! if (compress_clean_page(page, gfp_mask))
continue;
!
/* point of no return */
if (likely(!PageSwapCache(page))) {
***************
*** 563,567 ****
spin_lock(&pagemap_lru_lock);
entry = active_list.prev;
! while (nr_pages-- && entry != &active_list) {
struct page * page;
--- 553,557 ----
spin_lock(&pagemap_lru_lock);
entry = active_list.prev;
! while (nr_pages && entry != &active_list) {
struct page * page;
***************
*** 573,576 ****
--- 563,568 ----
continue;
}
+
+ nr_pages--;
del_page_from_active_list(page);
|