[lc-checkins] CVS: linux/fs buffer.c,1.17,1.18 inode.c,1.3,1.4
Status: Beta
Brought to you by:
nitin_sf
From: Rodrigo S. de C. <rc...@us...> - 2003-05-19 01:39:20
|
Update of /cvsroot/linuxcompressed/linux/fs In directory sc8-pr-cvs1:/tmp/cvs-serv25395/fs Modified Files: buffer.c inode.c Log Message: o Port code to 2.4.20 Bug fix (?) o Changes checks in vswap.c to avoid oopses. It will BUG() instead. Some of the checks were done after the value had been accessed. Note o Virtual swap addresses are temporarily disabled, due to debugging sessions related to the use of swap files instead of swap partitions. Index: buffer.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/buffer.c,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -r1.17 -r1.18 *** buffer.c 29 Nov 2002 21:23:02 -0000 1.17 --- buffer.c 19 May 2003 01:38:46 -0000 1.18 *************** *** 55,59 **** #include <asm/mmu_context.h> - #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) #define NR_RESERVED (10*MAX_BUF_PER_PAGE) #define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this --- 55,58 ---- *************** *** 75,79 **** static struct buffer_head *lru_list[NR_LIST]; ! static spinlock_t lru_list_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; static int nr_buffers_type[NR_LIST]; static unsigned long size_buffers_type[NR_LIST]; --- 74,81 ---- static struct buffer_head *lru_list[NR_LIST]; ! ! static spinlock_cacheline_t lru_list_lock_cacheline = {SPIN_LOCK_UNLOCKED}; ! #define lru_list_lock lru_list_lock_cacheline.lock ! static int nr_buffers_type[NR_LIST]; static unsigned long size_buffers_type[NR_LIST]; *************** *** 85,88 **** --- 87,91 ---- static int grow_buffers(kdev_t dev, unsigned long block, int size); + static int osync_buffers_list(struct list_head *); static void __refile_buffer(struct buffer_head *); *************** *** 104,108 **** int nfract; /* Percentage of buffer cache dirty to activate bdflush */ ! int dummy1; /* old "ndirty" */ int dummy2; /* old "nrefill" */ int dummy3; /* unused */ --- 107,112 ---- int nfract; /* Percentage of buffer cache dirty to activate bdflush */ ! int ndirty; /* Maximum number of dirty blocks to write out per ! wake-cycle */ int dummy2; /* old "nrefill" */ int dummy3; /* unused */ *************** *** 111,128 **** int nfract_sync;/* Percentage of buffer cache dirty to activate bdflush synchronously */ ! int dummy4; /* unused */ int dummy5; /* unused */ } b_un; unsigned int data[N_PARAM]; ! } bdf_prm = {{40, 0, 0, 0, 5*HZ, 30*HZ, 60, 0, 0}}; /* These are the min and max parameter values that we will allow to be assigned */ ! int bdflush_min[N_PARAM] = { 0, 10, 5, 25, 0, 1*HZ, 0, 0, 0}; ! int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 6000*HZ, 100, 0, 0}; void unlock_buffer(struct buffer_head *bh) { clear_bit(BH_Wait_IO, &bh->b_state); ! clear_bit(BH_launder, &bh->b_state); clear_bit(BH_Lock, &bh->b_state); smp_mb__after_clear_bit(); --- 115,139 ---- int nfract_sync;/* Percentage of buffer cache dirty to activate bdflush synchronously */ ! int nfract_stop_bdflush; /* Percetange of buffer cache dirty to stop bdflush */ int dummy5; /* unused */ } b_un; unsigned int data[N_PARAM]; ! } bdf_prm = {{30, 500, 0, 0, 5*HZ, 30*HZ, 60, 20, 0}}; /* These are the min and max parameter values that we will allow to be assigned */ ! int bdflush_min[N_PARAM] = { 0, 1, 0, 0, 0, 1*HZ, 0, 0, 0}; ! int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 10000*HZ, 100, 100, 0}; void unlock_buffer(struct buffer_head *bh) { clear_bit(BH_Wait_IO, &bh->b_state); ! clear_bit(BH_Launder, &bh->b_state); ! /* ! * When a locked buffer is visible to the I/O layer BH_Launder ! * is set. This means before unlocking we must clear BH_Launder, ! * mb() on alpha and then clear BH_Lock, so no reader can see ! * BH_Launder set on an unlocked buffer and then risk to deadlock. ! */ ! smp_mb__after_clear_bit(); clear_bit(BH_Lock, &bh->b_state); smp_mb__after_clear_bit(); *************** *** 132,142 **** /* - * Rewrote the wait-routines to use the "new" wait-queue functionality, - * and getting rid of the cli-sti pairs. The wait-queue routines still - * need cli-sti, but now it's just a couple of 386 instructions or so. - * * Note that the real wait_on_buffer() is an inline function that checks ! * if 'b_wait' is set before calling this, so that the queues aren't set ! * up unnecessarily. */ void __wait_on_buffer(struct buffer_head * bh) --- 143,149 ---- /* * Note that the real wait_on_buffer() is an inline function that checks ! * that the buffer is locked before calling this, so that unnecessary disk ! * unplugging does not occur. */ void __wait_on_buffer(struct buffer_head * bh) *************** *** 204,208 **** next = bh->b_next_free; ! if (dev && bh->b_dev != dev) continue; if (test_and_set_bit(BH_Lock, &bh->b_state)) --- 211,215 ---- next = bh->b_next_free; ! if (dev != NODEV && bh->b_dev != dev) continue; if (test_and_set_bit(BH_Lock, &bh->b_state)) *************** *** 234,241 **** static void write_unlocked_buffers(kdev_t dev) { ! do { spin_lock(&lru_list_lock); ! } while (write_some_buffers(dev)); ! run_task_queue(&tq_disk); } --- 241,247 ---- static void write_unlocked_buffers(kdev_t dev) { ! do spin_lock(&lru_list_lock); ! while (write_some_buffers(dev)); } *************** *** 262,266 **** continue; } ! if (dev && bh->b_dev != dev) continue; --- 268,272 ---- continue; } ! if (dev != NODEV && bh->b_dev != dev) continue; *************** *** 275,284 **** } - static inline void wait_for_some_buffers(kdev_t dev) - { - spin_lock(&lru_list_lock); - wait_for_buffers(dev, BUF_LOCKED, 1); - } - static int wait_for_locked_buffers(kdev_t dev, int index, int refile) { --- 281,284 ---- *************** *** 731,743 **** static void free_more_memory(void) { - zone_t * zone = contig_page_data.node_zonelists[GFP_NOFS & GFP_ZONEMASK].zones[0]; - balance_dirty(); wakeup_bdflush(); ! try_to_free_pages(zone, GFP_NOFS, 0); run_task_queue(&tq_disk); ! current->policy |= SCHED_YIELD; ! __set_current_state(TASK_RUNNING); ! schedule(); } --- 731,739 ---- static void free_more_memory(void) { balance_dirty(); wakeup_bdflush(); ! try_to_free_pages(GFP_NOIO); run_task_queue(&tq_disk); ! yield(); } *************** *** 755,758 **** --- 751,755 ---- struct buffer_head *tmp; struct page *page; + int fullup = 1; mark_buffer_uptodate(bh, uptodate); *************** *** 781,786 **** tmp = bh->b_this_page; while (tmp != bh) { ! if (buffer_async(tmp) && buffer_locked(tmp)) ! goto still_busy; tmp = tmp->b_this_page; } --- 778,786 ---- tmp = bh->b_this_page; while (tmp != bh) { ! if (buffer_locked(tmp)) { ! if (buffer_async(tmp)) ! goto still_busy; ! } else if (!buffer_uptodate(tmp)) ! fullup = 0; tmp = tmp->b_this_page; } *************** *** 790,797 **** /* ! * if none of the buffers had errors then we can set the ! * page uptodate: */ ! if (!PageError(page)) SetPageUptodate(page); --- 790,797 ---- /* ! * If none of the buffers had errors and all were uptodate ! * then we can set the page uptodate: */ ! if (fullup && !PageError(page)) SetPageUptodate(page); *************** *** 805,811 **** } ! inline void set_buffer_async_io(struct buffer_head *bh) { ! bh->b_end_io = end_buffer_io_async ; ! mark_buffer_async(bh, 1); } --- 805,812 ---- } ! inline void set_buffer_async_io(struct buffer_head *bh) ! { ! bh->b_end_io = end_buffer_io_async; ! mark_buffer_async(bh, 1); } *************** *** 829,834 **** * any newly dirty buffers for write. */ ! ! int fsync_inode_buffers(struct inode *inode) { struct buffer_head *bh; --- 830,834 ---- * any newly dirty buffers for write. */ ! int fsync_buffers_list(struct list_head *list) { struct buffer_head *bh; *************** *** 840,845 **** spin_lock(&lru_list_lock); ! while (!list_empty(&inode->i_dirty_buffers)) { ! bh = BH_ENTRY(inode->i_dirty_buffers.next); list_del(&bh->b_inode_buffers); if (!buffer_dirty(bh) && !buffer_locked(bh)) --- 840,845 ---- spin_lock(&lru_list_lock); ! while (!list_empty(list)) { ! bh = BH_ENTRY(list->next); list_del(&bh->b_inode_buffers); if (!buffer_dirty(bh) && !buffer_locked(bh)) *************** *** 851,854 **** --- 851,863 ---- get_bh(bh); spin_unlock(&lru_list_lock); + /* + * Wait I/O completion before submitting + * the buffer, to be sure the write will + * be effective on the latest data in + * the buffer. (otherwise - if there's old + * I/O in flight - write_buffer would become + * a noop) + */ + wait_on_buffer(bh); ll_rw_block(WRITE, 1, &bh); brelse(bh); *************** *** 871,924 **** spin_unlock(&lru_list_lock); ! err2 = osync_inode_buffers(inode); ! ! if (err) ! return err; ! else ! return err2; ! } ! ! int fsync_inode_data_buffers(struct inode *inode) ! { ! struct buffer_head *bh; ! struct inode tmp; ! int err = 0, err2; ! ! INIT_LIST_HEAD(&tmp.i_dirty_data_buffers); ! ! spin_lock(&lru_list_lock); ! ! while (!list_empty(&inode->i_dirty_data_buffers)) { ! bh = BH_ENTRY(inode->i_dirty_data_buffers.next); ! list_del(&bh->b_inode_buffers); ! if (!buffer_dirty(bh) && !buffer_locked(bh)) ! bh->b_inode = NULL; ! else { ! bh->b_inode = &tmp; ! list_add(&bh->b_inode_buffers, &tmp.i_dirty_data_buffers); ! if (buffer_dirty(bh)) { ! get_bh(bh); ! spin_unlock(&lru_list_lock); ! ll_rw_block(WRITE, 1, &bh); ! brelse(bh); ! spin_lock(&lru_list_lock); ! } ! } ! } ! ! while (!list_empty(&tmp.i_dirty_data_buffers)) { ! bh = BH_ENTRY(tmp.i_dirty_data_buffers.prev); ! remove_inode_queue(bh); ! get_bh(bh); ! spin_unlock(&lru_list_lock); ! wait_on_buffer(bh); ! if (!buffer_uptodate(bh)) ! err = -EIO; ! brelse(bh); ! spin_lock(&lru_list_lock); ! } ! ! spin_unlock(&lru_list_lock); ! err2 = osync_inode_data_buffers(inode); if (err) --- 880,884 ---- spin_unlock(&lru_list_lock); ! err2 = osync_buffers_list(list); if (err) *************** *** 934,975 **** * * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as ! * you dirty the buffers, and then use osync_inode_buffers to wait for * completion. Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */ ! ! int osync_inode_buffers(struct inode *inode) ! { ! struct buffer_head *bh; ! struct list_head *list; ! int err = 0; ! ! spin_lock(&lru_list_lock); ! ! repeat: ! ! for (list = inode->i_dirty_buffers.prev; ! bh = BH_ENTRY(list), list != &inode->i_dirty_buffers; ! list = bh->b_inode_buffers.prev) { ! if (buffer_locked(bh)) { ! get_bh(bh); ! spin_unlock(&lru_list_lock); ! wait_on_buffer(bh); ! if (!buffer_uptodate(bh)) ! err = -EIO; ! brelse(bh); ! spin_lock(&lru_list_lock); ! goto repeat; ! } ! } ! ! spin_unlock(&lru_list_lock); ! return err; ! } ! ! int osync_inode_data_buffers(struct inode *inode) { struct buffer_head *bh; ! struct list_head *list; int err = 0; --- 894,905 ---- * * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as ! * you dirty the buffers, and then use osync_buffers_list to wait for * completion. Any other dirty buffers which are not yet queued for * write will not be flushed to disk by the osync. */ ! static int osync_buffers_list(struct list_head *list) { struct buffer_head *bh; ! struct list_head *p; int err = 0; *************** *** 977,984 **** repeat: ! ! for (list = inode->i_dirty_data_buffers.prev; ! bh = BH_ENTRY(list), list != &inode->i_dirty_data_buffers; ! list = bh->b_inode_buffers.prev) { if (buffer_locked(bh)) { get_bh(bh); --- 907,912 ---- repeat: ! list_for_each_prev(p, list) { ! bh = BH_ENTRY(p); if (buffer_locked(bh)) { get_bh(bh); *************** *** 997,1001 **** } - /* * Invalidate any and all dirty buffers on a given inode. We are --- 925,928 ---- *************** *** 1032,1037 **** bh = get_hash_table(dev, block, size); ! if (bh) return bh; if (!grow_buffers(dev, block, size)) --- 959,966 ---- bh = get_hash_table(dev, block, size); ! if (bh) { ! touch_buffer(bh); return bh; + } if (!grow_buffers(dev, block, size)) *************** *** 1048,1052 **** dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT; - dirty += size_buffers_type[BUF_LOCKED] >> PAGE_SHIFT; tot = nr_free_buffer_pages(); --- 977,980 ---- *************** *** 1065,1068 **** --- 993,1011 ---- } + static int bdflush_stop(void) + { + unsigned long dirty, tot, dirty_limit; + + dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT; + tot = nr_free_buffer_pages(); + + dirty *= 100; + dirty_limit = tot * bdf_prm.b_un.nfract_stop_bdflush; + + if (dirty > dirty_limit) + return 0; + return 1; + } + /* * if a new dirty buffer is created we need to balance bdflush. *************** *** 1079,1095 **** return; ! /* If we're getting into imbalance, start write-out */ ! spin_lock(&lru_list_lock); ! write_some_buffers(NODEV); /* * And if we're _really_ out of balance, wait for ! * some of the dirty/locked buffers ourselves and ! * start bdflush. * This will throttle heavy writers. */ if (state > 0) { ! wait_for_some_buffers(NODEV); ! wakeup_bdflush(); } } --- 1022,1035 ---- return; ! wakeup_bdflush(); /* * And if we're _really_ out of balance, wait for ! * some of the dirty/locked buffers ourselves. * This will throttle heavy writers. */ if (state > 0) { ! spin_lock(&lru_list_lock); ! write_some_buffers(NODEV); } } *************** *** 1185,1189 **** bh = getblk(dev, block, size); - touch_buffer(bh); if (buffer_uptodate(bh)) return bh; --- 1125,1128 ---- *************** *** 1274,1287 **** void set_bh_page (struct buffer_head *bh, struct page *page, unsigned long offset) { - bh->b_page = page; if (offset >= PAGE_SIZE) BUG(); ! if (PageHighMem(page)) ! /* ! * This catches illegal uses and preserves the offset: ! */ ! bh->b_data = (char *)(0 + offset); ! else ! bh->b_data = page_address(page) + offset; } EXPORT_SYMBOL(set_bh_page); --- 1213,1224 ---- void set_bh_page (struct buffer_head *bh, struct page *page, unsigned long offset) { if (offset >= PAGE_SIZE) BUG(); ! ! /* ! * page_address will return NULL anyways for highmem pages ! */ ! bh->b_data = page_address(page) + offset; ! bh->b_page = page; } EXPORT_SYMBOL(set_bh_page); *************** *** 1682,1687 **** --- 1619,1636 ---- * data. If BH_New is set, we know that the block was newly * allocated in the above loop. + * + * Details the buffer can be new and uptodate because: + * 1) hole in uptodate page, get_block(create) allocate the block, + * so the buffer is new and additionally we also mark it uptodate + * 2) The buffer is not mapped and uptodate due a previous partial read. + * + * We can always ignore uptodate buffers here, if you mark a buffer + * uptodate you must make sure it contains the right data first. + * + * We must stop the "undo/clear" fixup pass not at the caller "to" + * but at the last block that we successfully arrived in the main loop. */ bh = head; + to = block_start; /* stop at the last successfully handled block */ block_start = 0; do { *************** *** 1691,1698 **** if (block_start >= to) break; ! if (buffer_new(bh)) { ! if (buffer_uptodate(bh)) ! printk(KERN_ERR "%s: zeroing uptodate buffer!\n", __FUNCTION__); memset(kaddr+block_start, 0, bh->b_size); set_bit(BH_Uptodate, &bh->b_state); mark_buffer_dirty(bh); --- 1640,1646 ---- if (block_start >= to) break; ! if (buffer_new(bh) && !buffer_uptodate(bh)) { memset(kaddr+block_start, 0, bh->b_size); + flush_dcache_page(page); set_bit(BH_Uptodate, &bh->b_state); mark_buffer_dirty(bh); *************** *** 1817,1823 **** /* Stage 3: start the IO */ ! for (i = 0; i < nr; i++) ! submit_bh(READ, arr[i]); ! return 0; } --- 1765,1776 ---- /* Stage 3: start the IO */ ! for (i = 0; i < nr; i++) { ! struct buffer_head * bh = arr[i]; ! if (buffer_uptodate(bh)) ! end_buffer_io_async(bh, 1); ! else ! submit_bh(READ, bh); ! } ! return 0; } *************** *** 2054,2058 **** kunmap(page); ! __mark_buffer_dirty(bh); err = 0; --- 2007,2016 ---- kunmap(page); ! if (!atomic_set_buffer_dirty(bh)) { ! __mark_dirty(bh); ! buffer_insert_inode_data_queue(bh, inode); ! balance_dirty(); ! } ! err = 0; *************** *** 2259,2264 **** * * The kiobuf must already be locked for IO. IO is submitted ! * asynchronously: you need to check page->locked, page->uptodate, and ! * maybe wait on page->wait. * * It is up to the caller to make sure that there are enough blocks --- 2217,2221 ---- * * The kiobuf must already be locked for IO. IO is submitted ! * asynchronously: you need to check page->locked and page->uptodate. * * It is up to the caller to make sure that there are enough blocks *************** *** 2393,2398 **** * Start I/O on a page. * This function expects the page to be locked and may return ! * before I/O is complete. You then have to check page->locked, ! * page->uptodate, and maybe wait on page->wait. * * brw_page() is SMP-safe, although it's being called with the --- 2350,2355 ---- * Start I/O on a page. * This function expects the page to be locked and may return ! * before I/O is complete. You then have to check page->locked ! * and page->uptodate. * * brw_page() is SMP-safe, although it's being called with the *************** *** 2595,2602 **** } static int sync_page_buffers(struct buffer_head *head) { struct buffer_head * bh = head; ! int tryagain = 0; do { --- 2552,2590 ---- } + /* + * The first time the VM inspects a page which has locked buffers, it + * will just mark it as needing waiting upon on the scan of the page LRU. + * BH_Wait_IO is used for this. + * + * The second time the VM visits the page, if it still has locked + * buffers, it is time to start writing them out. (BH_Wait_IO was set). + * + * The third time the VM visits the page, if the I/O hasn't completed + * then it's time to wait upon writeout. BH_Lock and BH_Launder are + * used for this. + * + * There is also the case of buffers which were locked by someone else + * - write(2) callers, bdflush, etc. There can be a huge number of these + * and we don't want to just skip them all and fail the page allocation. + * We want to be able to wait on these buffers as well. + * + * The BH_Launder bit is set in submit_bh() to indicate that I/O is + * underway against the buffer, doesn't matter who started it - we know + * that the buffer will eventually come unlocked, and so it's safe to + * wait on it. + * + * The caller holds the page lock and the caller will free this page + * into current->local_page, so by waiting on the page's buffers the + * caller is guaranteed to obtain this page. + * + * sync_page_buffers() will sort-of return true if all the buffers + * against this page are freeable, so try_to_free_buffers() should + * try to free the page's buffers a second time. This is a bit + * broken for blocksize < PAGE_CACHE_SIZE, but not very importantly. + */ static int sync_page_buffers(struct buffer_head *head) { struct buffer_head * bh = head; ! int tryagain = 1; do { *************** *** 2605,2615 **** /* Don't start IO first time around.. */ ! if (!test_and_set_bit(BH_Wait_IO, &bh->b_state)) continue; /* Second time through we start actively writing out.. */ if (test_and_set_bit(BH_Lock, &bh->b_state)) { ! if (!test_bit(BH_launder, &bh->b_state)) continue; wait_on_buffer(bh); tryagain = 1; --- 2593,2607 ---- /* Don't start IO first time around.. */ ! if (!test_and_set_bit(BH_Wait_IO, &bh->b_state)) { ! tryagain = 0; continue; + } /* Second time through we start actively writing out.. */ if (test_and_set_bit(BH_Lock, &bh->b_state)) { ! if (unlikely(!buffer_launder(bh))) { ! tryagain = 0; continue; + } wait_on_buffer(bh); tryagain = 1; *************** *** 2624,2628 **** __mark_buffer_clean(bh); get_bh(bh); - set_bit(BH_launder, &bh->b_state); bh->b_end_io = end_buffer_io_sync; submit_bh(WRITE, bh); --- 2616,2619 ---- *************** *** 2949,2960 **** complete((struct completion *)startup); for (;;) { CHECK_EMERGENCY_SYNC ! spin_lock(&lru_list_lock); ! if (!write_some_buffers(NODEV) || balance_dirty_state() < 0) { ! wait_for_some_buffers(NODEV); ! interruptible_sleep_on(&bdflush_wait); } } } --- 2940,2966 ---- complete((struct completion *)startup); + /* + * FIXME: The ndirty logic here is wrong. It's supposed to + * send bdflush back to sleep after writing ndirty buffers. + * In fact, the test is wrong so bdflush will in fact + * sleep when bdflush_stop() returns true. + * + * FIXME: If it proves useful to implement ndirty properly, + * then perhaps the value of ndirty should be scaled by the + * amount of memory in the machine. + */ for (;;) { + int ndirty = bdf_prm.b_un.ndirty; + CHECK_EMERGENCY_SYNC ! while (ndirty > 0) { ! spin_lock(&lru_list_lock); ! if (!write_some_buffers(NODEV)) ! break; ! ndirty -= NRSYNC; } + if (ndirty > 0 || bdflush_stop()) + interruptible_sleep_on(&bdflush_wait); } } *************** *** 2985,2990 **** for (;;) { - wait_for_some_buffers(NODEV); - /* update interval */ interval = bdf_prm.b_un.interval; --- 2991,2994 ---- *************** *** 3014,3017 **** --- 3018,3022 ---- #endif sync_old_buffers(); + run_task_queue(&tq_disk); } } Index: inode.c =================================================================== RCS file: /cvsroot/linuxcompressed/linux/fs/inode.c,v retrieving revision 1.3 retrieving revision 1.4 diff -C2 -r1.3 -r1.4 *** inode.c 27 Feb 2002 19:58:51 -0000 1.3 --- inode.c 19 May 2003 01:38:46 -0000 1.4 *************** *** 253,257 **** static inline void sync_one(struct inode *inode, int sync) { ! if (inode->i_state & I_LOCK) { __iget(inode); spin_unlock(&inode_lock); --- 253,257 ---- static inline void sync_one(struct inode *inode, int sync) { ! while (inode->i_state & I_LOCK) { __iget(inode); spin_unlock(&inode_lock); *************** *** 259,265 **** iput(inode); spin_lock(&inode_lock); - } else { - __sync_one(inode, sync); } } --- 259,265 ---- iput(inode); spin_lock(&inode_lock); } + + __sync_one(inode, sync); } *************** *** 731,736 **** prune_icache(count); ! kmem_cache_shrink(inode_cachep); ! return 0; } --- 731,735 ---- prune_icache(count); ! return kmem_cache_shrink(inode_cachep); } *************** *** 1158,1162 **** } while (inode_hashtable == NULL && --order >= 0); ! printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); --- 1157,1161 ---- } while (inode_hashtable == NULL && --order >= 0); ! printk(KERN_INFO "Inode cache hash table entries: %d (order: %ld, %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); |