From: <th...@ke...> - 2009-02-17 08:13:09
|
linux-core/ttm/ttm_bo.c | 9 +++++---- linux-core/ttm/ttm_memory.c | 10 +++++++--- 2 files changed, 12 insertions(+), 7 deletions(-) New commits: commit 875a6ae21755bd92e875cb136c58537082320572 Author: Thomas Hellstrom <thellstrom-at-vmware-dot-com> Date: Tue Feb 17 09:07:49 2009 +0100 ttm: Adjust swapping target so that current allocation will succeed. diff --git a/linux-core/ttm/ttm_memory.c b/linux-core/ttm/ttm_memory.c index 6478ed8..73c45ed 100644 --- a/linux-core/ttm/ttm_memory.c +++ b/linux-core/ttm/ttm_memory.c @@ -42,7 +42,8 @@ * many threads may try to swap out at any given time. */ -static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue) +static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, + uint64_t extra) { int ret; struct ttm_mem_shrink *shrink; @@ -64,6 +65,9 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue) target = glob->max_memory; } + total_target = (extra >= total_target) ? 0: total_target - extra; + target = (extra >= target) ? 0: target - extra; + while (glob->used_memory > target || glob->used_total_memory > total_target) { shrink = glob->shrink; @@ -82,7 +86,7 @@ static void ttm_shrink_work(struct work_struct *work) struct ttm_mem_global *glob = container_of(work, struct ttm_mem_global, work); - ttm_shrink(glob, true); + ttm_shrink(glob, true, 0ULL); } int ttm_mem_global_init(struct ttm_mem_global *glob) @@ -203,7 +207,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, return -ENOMEM; if (unlikely(count-- == 0)) return -ENOMEM; - ttm_shrink(glob, false); + ttm_shrink(glob, false, memory + (memory >> 2) + 16); } return 0; commit e8eb30ad8bf2bbd84155cd27a9dbd5e0b0f5bb36 Author: Thomas Hellstrom <thellstrom-at-vmware-dot-com> Date: Tue Feb 17 09:03:05 2009 +0100 ttm: Fix a spinlock bug. diff --git a/linux-core/ttm/ttm_bo.c b/linux-core/ttm/ttm_bo.c index 325f550..6679801 100644 --- a/linux-core/ttm/ttm_bo.c +++ b/linux-core/ttm/ttm_bo.c @@ -143,6 +143,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool no_wait, bool use_sequence, uint32_t sequence) { struct ttm_bo_device *bdev = bo->bdev; + int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { if (use_sequence && bo->seq_valid && @@ -154,11 +155,11 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, return -EBUSY; spin_unlock(&bdev->lru_lock); - - if (ttm_bo_wait_unreserved(bo, interruptible) != 0) - return -ERESTART; - + ret = ttm_bo_wait_unreserved(bo, interruptible); spin_lock(&bdev->lru_lock); + + if (unlikely(ret)) + return ret; } if (use_sequence) { |