From: NIIBE Y. <gn...@m1...> - 2002-01-24 10:15:06
|
I'll commit following changes soon to update to 2.4.17, rotating ChangeLog. (I've done the tag of 2.4.16 already.) Then, I'll backport FPU changes from 2.5. Paul, I've found ChangeLog entry but not find the new file. Could you please add the file to 2.4 branch too. Index: Makefile =================================================================== RCS file: /cvsroot/linuxsh/linux/Makefile,v retrieving revision 1.1.1.1.2.2 diff -u -3 -p -r1.1.1.1.2.2 Makefile --- Makefile 2001/11/30 23:03:19 1.1.1.1.2.2 +++ Makefile 2002/01/24 10:09:29 @@ -1,6 +1,6 @@ VERSION = 2 PATCHLEVEL = 4 -SUBLEVEL = 16 +SUBLEVEL = 17 EXTRAVERSION = KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) Index: TODO =================================================================== RCS file: /cvsroot/linuxsh/linux/TODO,v retrieving revision 1.1.1.1 diff -u -3 -p -r1.1.1.1 TODO --- TODO 2001/10/15 20:44:31 1.1.1.1 +++ TODO 2002/01/24 10:09:29 @@ -1,4 +1,5 @@ -* Support zImage with big-endian. +* include/asm-sh/pgalloc.h + Use slab for pgd_alloc * include/asm-sh/hw_irq.h Kernel profiling @@ -82,3 +83,5 @@ DONE: * reboot SR.BL=1 and address error + +* Support zImage with big-endian. Index: Documentation/cachetlb.txt =================================================================== RCS file: /cvsroot/linuxsh/linux/Documentation/cachetlb.txt,v retrieving revision 1.1.2.1 diff -u -3 -p -r1.1.2.1 cachetlb.txt --- Documentation/cachetlb.txt 2001/11/30 01:06:04 1.1.2.1 +++ Documentation/cachetlb.txt 2002/01/24 10:09:33 @@ -275,7 +275,7 @@ Here is the new interface: for example, uses this technique. The "address" parameter tells the virtual address where the - user will ultimately this page mapped. + user will ultimately have this page mapped. If D-cache aliasing is not an issue, these two routines may simply call memcpy/memset directly and do nothing more. Index: drivers/block/rd.c =================================================================== RCS file: /cvsroot/linuxsh/linux/drivers/block/rd.c,v retrieving revision 1.1.1.1.2.3 diff -u -3 -p -r1.1.1.1.2.3 rd.c --- drivers/block/rd.c 2001/12/17 07:26:46 1.1.1.1.2.3 +++ drivers/block/rd.c 2002/01/24 10:09:33 @@ -110,7 +110,7 @@ static struct block_device *rd_bdev[NUM_ */ int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */ /* - * It would be very desiderable to have a soft-blocksize (that in the case + * It would be very desirable to have a soft-blocksize (that in the case * of the ramdisk driver is also the hardblocksize ;) of PAGE_SIZE because * doing that we'll achieve a far better MM footprint. Using a rd_blocksize of * BLOCK_SIZE in the worst case we'll make PAGE_SIZE/BLOCK_SIZE buffer-pages Index: drivers/net/8139too.c =================================================================== RCS file: /cvsroot/linuxsh/linux/drivers/net/8139too.c,v retrieving revision 1.1.1.1.2.3 diff -u -3 -p -r1.1.1.1.2.3 8139too.c --- drivers/net/8139too.c 2001/11/30 23:03:54 1.1.1.1.2.3 +++ drivers/net/8139too.c 2002/01/24 10:09:33 @@ -1659,23 +1659,24 @@ static int rtl8139_start_xmit (struct sk struct rtl8139_private *tp = dev->priv; void *ioaddr = tp->mmio_addr; unsigned int entry; + unsigned int len = skb->len; /* Calculate the next Tx descriptor entry. */ entry = tp->cur_tx % NUM_TX_DESC; - if (likely(skb->len < TX_BUF_SIZE)) { + if (likely(len < TX_BUF_SIZE)) { skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); dev_kfree_skb(skb); } else { dev_kfree_skb(skb); tp->stats.tx_dropped++; return 0; - } + } /* Note: the chip doesn't have auto-pad! */ spin_lock_irq(&tp->lock); RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), - tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); + tp->tx_flag | max(len, (unsigned int) ETH_ZLEN)); dev->trans_start = jiffies; @@ -1687,7 +1688,7 @@ static int rtl8139_start_xmit (struct sk spin_unlock_irq(&tp->lock); DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n", - dev->name, skb->data, skb->len, entry); + dev->name, skb->data, len, entry); return 0; } @@ -2516,7 +2517,7 @@ static struct pci_driver rtl8139_pci_dri name: DRV_NAME, id_table: rtl8139_pci_tbl, probe: rtl8139_init_one, - remove: rtl8139_remove_one, + remove: __devexit_p(rtl8139_remove_one), #ifdef CONFIG_PM suspend: rtl8139_suspend, resume: rtl8139_resume, Index: include/linux/highmem.h =================================================================== RCS file: /cvsroot/linuxsh/linux/include/linux/highmem.h,v retrieving revision 1.1.1.1.2.1 diff -u -3 -p -r1.1.1.1.2.1 highmem.h --- include/linux/highmem.h 2001/12/17 07:26:46 1.1.1.1.2.1 +++ include/linux/highmem.h 2002/01/24 10:09:33 @@ -93,15 +93,4 @@ static inline void copy_user_highpage(st kunmap_atomic(vto, KM_USER1); } -static inline void copy_highpage(struct page *to, struct page *from) -{ - char *vfrom, *vto; - - vfrom = kmap(from); - vto = kmap(to); - copy_page(vto, vfrom); - kunmap(from); - kunmap(to); -} - #endif /* _LINUX_HIGHMEM_H */ Index: kernel/ptrace.c =================================================================== RCS file: /cvsroot/linuxsh/linux/kernel/ptrace.c,v retrieving revision 1.1.1.1.2.1 diff -u -3 -p -r1.1.1.1.2.1 ptrace.c --- kernel/ptrace.c 2001/11/30 23:03:57 1.1.1.1.2.1 +++ kernel/ptrace.c 2002/01/24 10:09:33 @@ -121,120 +121,17 @@ int ptrace_detach(struct task_struct *ch } /* - * Access another process' address space, one page at a time. + * Access another process' address space. + * Source/target buffer must be kernel space, + * Do not walk the page table directly, use get_user_pages */ -static int access_one_page(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write) -{ - pgd_t * pgdir; - pmd_t * pgmiddle; - pte_t * pgtable; - char *maddr; - struct page *page; - -repeat: - spin_lock(&mm->page_table_lock); - pgdir = pgd_offset(vma->vm_mm, addr); - if (pgd_none(*pgdir)) - goto fault_in_page; - if (pgd_bad(*pgdir)) - goto bad_pgd; - pgmiddle = pmd_offset(pgdir, addr); - if (pmd_none(*pgmiddle)) - goto fault_in_page; - if (pmd_bad(*pgmiddle)) - goto bad_pmd; - pgtable = pte_offset(pgmiddle, addr); - if (!pte_present(*pgtable)) - goto fault_in_page; - if (write && (!pte_write(*pgtable) || !pte_dirty(*pgtable))) - goto fault_in_page; - page = pte_page(*pgtable); - - /* ZERO_PAGE is special: reads from it are ok even though it's marked reserved */ - if (page != ZERO_PAGE(addr) || write) { - if ((!VALID_PAGE(page)) || PageReserved(page)) { - spin_unlock(&mm->page_table_lock); - return 0; - } - } - get_page(page); - spin_unlock(&mm->page_table_lock); - flush_cache_page(vma, addr); - if (write) { - maddr = kmap(page); - memcpy(maddr + (addr & ~PAGE_MASK), buf, len); - flush_dcache_page(page); - flush_page_to_ram(page); - flush_icache_page(vma, page); - kunmap(page); - } else { - maddr = kmap(page); - memcpy(buf, maddr + (addr & ~PAGE_MASK), len); - flush_page_to_ram(page); - kunmap(page); - } - put_page(page); - return len; - -fault_in_page: - spin_unlock(&mm->page_table_lock); - /* -1: out of memory. 0 - unmapped page */ - if (handle_mm_fault(mm, vma, addr, write) > 0) - goto repeat; - return 0; - -bad_pgd: - spin_unlock(&mm->page_table_lock); - pgd_ERROR(*pgdir); - return 0; - -bad_pmd: - spin_unlock(&mm->page_table_lock); - pmd_ERROR(*pgmiddle); - return 0; -} - -static int access_mm(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long addr, void *buf, int len, int write) -{ - int copied = 0; - - for (;;) { - unsigned long offset = addr & ~PAGE_MASK; - int this_len = PAGE_SIZE - offset; - int retval; - - if (this_len > len) - this_len = len; - retval = access_one_page(mm, vma, addr, buf, this_len, write); - copied += retval; - if (retval != this_len) - break; - - len -= retval; - if (!len) - break; - - addr += retval; - buf += retval; - - if (addr < vma->vm_end) - continue; - if (!vma->vm_next) - break; - if (vma->vm_next->vm_start != vma->vm_end) - break; - - vma = vma->vm_next; - } - return copied; -} - int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { - int copied; struct mm_struct *mm; - struct vm_area_struct * vma; + struct vm_area_struct *vma; + struct page *page; + void *old_buf = buf; /* Worry about races with exit() */ task_lock(tsk); @@ -246,14 +143,42 @@ int access_process_vm(struct task_struct return 0; down_read(&mm->mmap_sem); - vma = find_extend_vma(mm, addr); - copied = 0; - if (vma) - copied = access_mm(mm, vma, addr, buf, len, write); + /* ignore errors, just check how much was sucessfully transfered */ + while (len) { + int bytes, ret, offset; + void *maddr; + + ret = get_user_pages(current, mm, addr, 1, + write, 1, &page, &vma); + if (ret <= 0) + break; + + bytes = len; + offset = addr & (PAGE_SIZE-1); + if (bytes > PAGE_SIZE-offset) + bytes = PAGE_SIZE-offset; + flush_cache_page(vma, addr); + + maddr = kmap(page); + if (write) { + memcpy(maddr + offset, buf, bytes); + flush_dcache_page(page); + flush_page_to_ram(page); + flush_icache_page(vma, page); + } else { + memcpy(buf, maddr + offset, bytes); + flush_page_to_ram(page); + } + kunmap(page); + put_page(page); + len -= bytes; + buf += bytes; + } up_read(&mm->mmap_sem); mmput(mm); - return copied; + + return buf - old_buf; } int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len) Index: mm/memory.c =================================================================== RCS file: /cvsroot/linuxsh/linux/mm/memory.c,v retrieving revision 1.1.1.1.2.3 diff -u -3 -p -r1.1.1.1.2.3 memory.c --- mm/memory.c 2001/12/12 00:11:59 1.1.1.1.2.3 +++ mm/memory.c 2002/01/24 10:09:33 @@ -397,17 +397,16 @@ void zap_page_range(struct mm_struct *mm spin_unlock(&mm->page_table_lock); } - /* * Do a quick page-table lookup for a single page. */ -static struct page * follow_page(unsigned long address, int write) +static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) { pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte; - pgd = pgd_offset(current->mm, address); + pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || pgd_bad(*pgd)) goto out; @@ -443,21 +442,74 @@ static inline struct page * get_page_map return page; } +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, + int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) +{ + int i = 0; + + do { + struct vm_area_struct * vma; + + vma = find_extend_vma(mm, start); + + if ( !vma || + (!force && + ((write && (!(vma->vm_flags & VM_WRITE))) || + (!write && (!(vma->vm_flags & VM_READ))) ) )) { + if (i) return i; + return -EFAULT; + } + + spin_lock(&mm->page_table_lock); + do { + struct page *map; + while (!(map = follow_page(mm, start, write))) { + spin_unlock(&mm->page_table_lock); + switch (handle_mm_fault(mm, vma, start, write)) { + case 1: + tsk->min_flt++; + break; + case 2: + tsk->maj_flt++; + break; + case 0: + if (i) return i; + return -EFAULT; + default: + if (i) return i; + return -ENOMEM; + } + spin_lock(&mm->page_table_lock); + } + if (pages) { + pages[i] = get_page_map(map); + /* FIXME: call the correct function, + * depending on the type of the found page + */ + if (pages[i]) + page_cache_get(pages[i]); + } + if (vmas) + vmas[i] = vma; + i++; + start += PAGE_SIZE; + len--; + } while(len && start < vma->vm_end); + spin_unlock(&mm->page_table_lock); + } while(len); + return i; +} + /* * Force in an entire range of pages from the current process's user VA, * and pin them in physical memory. */ - #define dprintk(x...) + int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len) { - unsigned long ptr, end; - int err; + int pgcount, err; struct mm_struct * mm; - struct vm_area_struct * vma = 0; - struct page * map; - int i; - int datain = (rw == READ); /* Make sure the iobuf is not already mapped somewhere. */ if (iobuf->nr_pages) @@ -466,79 +518,37 @@ int map_user_kiobuf(int rw, struct kiobu mm = current->mm; dprintk ("map_user_kiobuf: begin\n"); - ptr = va & PAGE_MASK; - end = (va + len + PAGE_SIZE - 1) & PAGE_MASK; - err = expand_kiobuf(iobuf, (end - ptr) >> PAGE_SHIFT); + pgcount = (va + len + PAGE_SIZE - 1)/PAGE_SIZE - va/PAGE_SIZE; + /* mapping 0 bytes is not permitted */ + if (!pgcount) BUG(); + err = expand_kiobuf(iobuf, pgcount); if (err) return err; - down_read(&mm->mmap_sem); - - err = -EFAULT; iobuf->locked = 0; - iobuf->offset = va & ~PAGE_MASK; + iobuf->offset = va & (PAGE_SIZE-1); iobuf->length = len; - i = 0; - - /* - * First of all, try to fault in all of the necessary pages - */ - while (ptr < end) { - if (!vma || ptr >= vma->vm_end) { - vma = find_vma(current->mm, ptr); - if (!vma) - goto out_unlock; - if (vma->vm_start > ptr) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto out_unlock; - if (expand_stack(vma, ptr)) - goto out_unlock; - } - if (((datain) && (!(vma->vm_flags & VM_WRITE))) || - (!(vma->vm_flags & VM_READ))) { - err = -EACCES; - goto out_unlock; - } - } - spin_lock(&mm->page_table_lock); - while (!(map = follow_page(ptr, datain))) { - int ret; - - spin_unlock(&mm->page_table_lock); - ret = handle_mm_fault(current->mm, vma, ptr, datain); - if (ret <= 0) { - if (!ret) - goto out_unlock; - else { - err = -ENOMEM; - goto out_unlock; - } - } - spin_lock(&mm->page_table_lock); - } - map = get_page_map(map); - if (map) { - flush_dcache_page(map); - page_cache_get(map); - } else - printk (KERN_INFO "Mapped page missing [%d]\n", i); - spin_unlock(&mm->page_table_lock); - iobuf->maplist[i] = map; - iobuf->nr_pages = ++i; - - ptr += PAGE_SIZE; - } - + /* Try to fault in all of the necessary pages */ + down_read(&mm->mmap_sem); + /* rw==READ means read from disk, write into memory area */ + err = get_user_pages(current, mm, va, pgcount, + (rw==READ), 0, iobuf->maplist, NULL); up_read(&mm->mmap_sem); + if (err < 0) { + unmap_kiobuf(iobuf); + dprintk ("map_user_kiobuf: end %d\n", err); + return err; + } + iobuf->nr_pages = err; + while (pgcount--) { + /* FIXME: flush superflous for rw==READ, + * probably wrong function for rw==WRITE + */ + flush_dcache_page(iobuf->maplist[pgcount]); + } dprintk ("map_user_kiobuf: end OK\n"); return 0; - - out_unlock: - up_read(&mm->mmap_sem); - unmap_kiobuf(iobuf); - dprintk ("map_user_kiobuf: end %d\n", err); - return err; } /* @@ -588,6 +598,9 @@ void unmap_kiobuf (struct kiobuf *iobuf) if (map) { if (iobuf->locked) UnlockPage(map); + /* FIXME: cache flush missing for rw==READ + * FIXME: call the correct reference counting function + */ page_cache_release(map); } } @@ -1026,16 +1039,10 @@ out_unlock: do_expand: limit = current->rlim[RLIMIT_FSIZE].rlim_cur; - if (limit != RLIM_INFINITY) { - if (inode->i_size >= limit) { - send_sig(SIGXFSZ, current, 0); - goto out; - } - if (offset > limit) { - send_sig(SIGXFSZ, current, 0); - offset = limit; - } - } + if (limit != RLIM_INFINITY && offset > limit) + goto out_sig; + if (offset > inode->i_sb->s_maxbytes) + goto out; inode->i_size = offset; out_truncate: @@ -1044,8 +1051,11 @@ out_truncate: inode->i_op->truncate(inode); unlock_kernel(); } -out: return 0; +out_sig: + send_sig(SIGXFSZ, current, 0); +out: + return -EFBIG; } /* @@ -1108,6 +1118,8 @@ static int do_swap_page(struct mm_struct ret = 2; } + mark_page_accessed(page); + lock_page(page); /* @@ -1178,6 +1190,7 @@ static int do_anonymous_page(struct mm_s flush_page_to_ram(page); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); lru_cache_add(page); + mark_page_accessed(page); } set_pte(page_table, entry); @@ -1225,9 +1238,11 @@ static int do_no_page(struct mm_struct * */ if (write_access && !(vma->vm_flags & VM_SHARED)) { struct page * page = alloc_page(GFP_HIGHUSER); - if (!page) + if (!page) { + page_cache_release(new_page); return -1; - copy_highpage(page, new_page); + } + copy_user_highpage(page, new_page, address); page_cache_release(new_page); lru_cache_add(page); new_page = page; @@ -1416,23 +1431,19 @@ out: return pte_offset(pmd, address); } -/* - * Simplistic page force-in.. - */ int make_pages_present(unsigned long addr, unsigned long end) { - int write; - struct mm_struct *mm = current->mm; + int ret, len, write; struct vm_area_struct * vma; - vma = find_vma(mm, addr); + vma = find_vma(current->mm, addr); write = (vma->vm_flags & VM_WRITE) != 0; if (addr >= end) BUG(); - do { - if (handle_mm_fault(mm, vma, addr, write) < 0) - return -1; - addr += PAGE_SIZE; - } while (addr < end); - return 0; + if (end > vma->vm_end) + BUG(); + len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; + ret = get_user_pages(current, current->mm, addr, + len, write, 0, NULL, NULL); + return ret == len ? 0 : -1; } -- |