From: <ke...@ke...> - 2008-06-06 20:00:57
|
libdrm/intel/intel_bufmgr_gem.c | 107 ++++++++++++++++++---------------------- linux-core/drm_irq.c | 1 linux-core/i915_gem.c | 43 ++++++++++++++-- shared-core/i915_dma.c | 102 +++++++++++++++++++++----------------- shared-core/i915_drm.h | 2 shared-core/i915_drv.h | 14 +++++ 6 files changed, 162 insertions(+), 107 deletions(-) New commits: commit 9f46c6935d154743162c6239903a4a9e443907bc Author: Keith Packard <ke...@ke...> Date: Fri Jun 6 12:59:52 2008 -0700 [intel-gem] Use timers to retire requests periodically. Without the user IRQ running constantly, there's no wakeup when the ring empties to go retire requests and free buffers. Use a 1 second timer to make that happen more often. diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 8f27d7f..318d9d7 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -124,6 +124,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, (unsigned long)dev); + init_timer_deferrable(&dev->vblank_disable_timer); spin_lock_init(&dev->vbl_lock); atomic_set(&dev->vbl_signal_pending, 0); dev->num_crtcs = num_crtcs; diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index 14e57b4..abc929e 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -162,6 +162,9 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->seqno = seqno; request->emitted_jiffies = jiffies; request->flush_domains = flush_domains; + if (list_empty(&dev_priv->mm.request_list)) + mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ); + list_add_tail(&request->list, &dev_priv->mm.request_list); return seqno; @@ -300,6 +303,32 @@ i915_gem_retire_requests(struct drm_device *dev) } } +void +i915_gem_retire_timeout(unsigned long data) +{ + struct drm_device *dev = (struct drm_device *) data; + drm_i915_private_t *dev_priv = dev->dev_private; + + schedule_work(&dev_priv->mm.retire_task); +} + +void +i915_gem_retire_handler(struct work_struct *work) +{ + drm_i915_private_t *dev_priv; + struct drm_device *dev; + + dev_priv = container_of(work, drm_i915_private_t, + mm.retire_task); + dev = dev_priv->dev; + + mutex_lock(&dev->struct_mutex); + i915_gem_retire_requests(dev); + if (!list_empty(&dev_priv->mm.request_list)) + mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ); + mutex_unlock(&dev->struct_mutex); +} + /** * Waits for a sequence number to be signaled, and cleans up the * request and object lists appropriately for that event. diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index c0ddeae..f6465bf 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1078,6 +1078,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); + dev_priv->mm.retire_timer.function = i915_gem_retire_timeout; + dev_priv->mm.retire_timer.data = (unsigned long) dev; + init_timer_deferrable (&dev_priv->mm.retire_timer); + INIT_WORK(&dev_priv->mm.retire_task, + i915_gem_retire_handler); INIT_WORK(&dev_priv->user_interrupt_task, i915_user_interrupt_handler); dev_priv->mm.next_gem_seqno = 1; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 33fb7ca..55af655 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -280,6 +280,16 @@ typedef struct drm_i915_private { */ struct list_head request_list; + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct timer_list retire_timer; + struct work_struct retire_task; + uint32_t next_gem_seqno; } mm; @@ -463,6 +473,8 @@ int i915_gem_flush_pwrite(struct drm_gem_object *obj, uint64_t offset, uint64_t size); void i915_gem_lastclose(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_timeout(unsigned long data); +void i915_gem_retire_handler(struct work_struct *work); #endif #ifdef __linux__ commit a708106c77f74f146722fba35eae772fb554ee9a Author: Keith Packard <ke...@ke...> Date: Fri Jun 6 12:58:41 2008 -0700 [intel] free the hardware status page at driver_unload This goes with the other hardware status page patch. diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 6e188f0..c0ddeae 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1103,8 +1103,9 @@ int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mmio_map) - drm_rmmap(dev, dev_priv->mmio_map); + i915_free_hardware_status(dev); + + drm_rmmap(dev, dev_priv->mmio_map); drm_free(dev->dev_private, sizeof(drm_i915_private_t), DRM_MEM_DRIVER); @@ -1113,8 +1114,6 @@ int i915_driver_unload(struct drm_device *dev) intel_fini_chipset_flush_compat(dev); #endif #endif - i915_free_hardware_status(dev); - return 0; } commit 56a96841d01d112d7d4adfebb572016398551ba8 Author: Keith Packard <ke...@ke...> Date: Fri Jun 6 12:57:01 2008 -0700 [intel-gem] Add explicit throttle ioctl Instead of throttling and execbuffer time, have the application ask to throttle explicitly. This allows the throttle to happen less often, and without holding the DRM lock. diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index 268411e..14e57b4 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -1371,10 +1371,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, #endif i915_kernel_lost_context(dev); - ret = i915_gem_ring_throttle(dev); - if (ret) - return ret; - /* Copy in the exec list from userland */ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, DRM_MEM_DRIVER); @@ -1628,6 +1624,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return 0; } +int +i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return i915_gem_ring_throttle(dev); +} + int i915_gem_init_object(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv; diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 1ea5c28..6e188f0 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1189,6 +1189,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 4712ea4..0fa292d 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -181,6 +181,7 @@ typedef struct drm_i915_sarea { #define DRM_I915_GEM_PIN 0x15 #define DRM_I915_GEM_UNPIN 0x16 #define DRM_I915_GEM_BUSY 0x17 +#define DRM_I915_GEM_THROTTLE 0x18 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -205,6 +206,7 @@ typedef struct drm_i915_sarea { #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) +#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) /* Asynchronous page flipping: */ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index d646177..33fb7ca 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -451,6 +451,8 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int i915_gem_init_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_set_domain(struct drm_gem_object *obj, commit 329e0862255e8ad27e2aa4e3755421a18ea1acc5 Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 16:05:35 2008 -0700 [libdrm/intel] Eliminate extra dri_gem_bo_bucket_entry structure Place the buffer reuse links right into the dri_bo_gem object. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 32e7091..6504ad6 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -57,14 +57,8 @@ typedef struct _dri_bo_gem dri_bo_gem; -struct dri_gem_bo_bucket_entry { - dri_bo_gem *bo_gem; - struct dri_gem_bo_bucket_entry *next; -}; - struct dri_gem_bo_bucket { - struct dri_gem_bo_bucket_entry *head; - struct dri_gem_bo_bucket_entry **tail; + dri_bo_gem *head, **tail; /** * Limit on the number of entries in this bucket. * @@ -129,6 +123,9 @@ struct _dri_bo_gem { int reloc_count; /** Mapped address for the buffer */ void *virtual; + + /** free list */ + dri_bo_gem *next; }; static int @@ -285,22 +282,19 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, /* Get a buffer out of the cache if available */ if (bucket != NULL && bucket->num_entries > 0) { - struct dri_gem_bo_bucket_entry *entry = bucket->head; struct drm_i915_gem_busy busy; - bo_gem = entry->bo_gem; + bo_gem = bucket->head; busy.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); alloc_from_cache = (ret == 0 && busy.busy == 0); if (alloc_from_cache) { - bucket->head = entry->next; - if (entry->next == NULL) + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) bucket->tail = &bucket->head; bucket->num_entries--; - - free(entry); } } @@ -417,20 +411,15 @@ dri_gem_bo_unreference(dri_bo *bo) (bucket->max_entries > 0 && bucket->num_entries < bucket->max_entries))) { - struct dri_gem_bo_bucket_entry *entry; - bo_gem->name = 0; bo_gem->validate_index = -1; bo_gem->relocs = NULL; bo_gem->reloc_target_bo = NULL; bo_gem->reloc_count = 0; - entry = calloc(1, sizeof(*entry)); - entry->bo_gem = bo_gem; - - entry->next = NULL; - *bucket->tail = entry; - bucket->tail = &entry->next; + bo_gem->next = NULL; + *bucket->tail = bo_gem; + bucket->tail = &bo_gem->next; bucket->num_entries++; } else { struct drm_gem_close close; @@ -599,19 +588,17 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) /* Free any cached buffer objects we were going to reuse */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; - struct dri_gem_bo_bucket_entry *entry; + dri_bo_gem *bo_gem; - while ((entry = bucket->head) != NULL) { + while ((bo_gem = bucket->head) != NULL) { struct drm_gem_close close; - dri_bo_gem *bo_gem; int ret; - bucket->head = entry->next; - if (entry->next == NULL) + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) bucket->tail = &bucket->head; bucket->num_entries--; - bo_gem = entry->bo_gem; if (bo_gem->mapped) munmap (bo_gem->virtual, bo_gem->bo.size); @@ -624,7 +611,6 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) } free(bo_gem); - free(entry); } } commit 5a55b48a410bb25666177c0ea8e5711ea2e3c795 Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 15:58:55 2008 -0700 [libdrm/intel] Remove unused intel_validate_entry structure diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 08cb5d6..32e7091 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -57,11 +57,6 @@ typedef struct _dri_bo_gem dri_bo_gem; -struct intel_validate_entry { - dri_bo_gem *bo_gem; - struct drm_i915_op_arg bo_arg; -}; - struct dri_gem_bo_bucket_entry { dri_bo_gem *bo_gem; struct dri_gem_bo_bucket_entry *next; commit a919ff5d5ec2fe716cbf5c593be7cc0705499107 Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 15:58:09 2008 -0700 [libdrm/intel] Reuse entire dri_bo_gem structure The code was discarding the dri_bo_gem structure and saving only the kernel handle. This lost the mmap address, causing pain when the next buffer user wanted to map the buffer. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 20f39b5..08cb5d6 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -55,15 +55,16 @@ fprintf(stderr, __VA_ARGS__); \ } while (0) +typedef struct _dri_bo_gem dri_bo_gem; + struct intel_validate_entry { - dri_bo *bo; + dri_bo_gem *bo_gem; struct drm_i915_op_arg bo_arg; }; struct dri_gem_bo_bucket_entry { - uint32_t gem_handle; - uint32_t last_offset; - struct dri_gem_bo_bucket_entry *next; + dri_bo_gem *bo_gem; + struct dri_gem_bo_bucket_entry *next; }; struct dri_gem_bo_bucket { @@ -103,7 +104,7 @@ typedef struct _dri_bufmgr_gem { struct drm_i915_gem_execbuffer exec_arg; } dri_bufmgr_gem; -typedef struct _dri_bo_gem { +struct _dri_bo_gem { dri_bo bo; int refcount; @@ -133,7 +134,7 @@ typedef struct _dri_bo_gem { int reloc_count; /** Mapped address for the buffer */ void *virtual; -} dri_bo_gem; +}; static int logbase2(int n) @@ -270,24 +271,21 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, int ret; struct dri_gem_bo_bucket *bucket; int alloc_from_cache = 0; - - bo_gem = calloc(1, sizeof(*bo_gem)); - if (!bo_gem) - return NULL; + unsigned long bo_size; /* Round the allocated size up to a power of two number of pages. */ - bo_gem->bo.size = 1 << logbase2(size); - if (bo_gem->bo.size < page_size) - bo_gem->bo.size = page_size; - bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_gem->bo.size); + bo_size = 1 << logbase2(size); + if (bo_size < page_size) + bo_size = page_size; + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size); /* If we don't have caching at this size, don't actually round the * allocation up. */ if (bucket == NULL || bucket->max_entries == 0) { - bo_gem->bo.size = size; - if (bo_gem->bo.size < page_size) - bo_gem->bo.size = page_size; + bo_size = size; + if (bo_size < page_size) + bo_size = page_size; } /* Get a buffer out of the cache if available */ @@ -295,7 +293,9 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, struct dri_gem_bo_bucket_entry *entry = bucket->head; struct drm_i915_gem_busy busy; - busy.handle = entry->gem_handle; + bo_gem = entry->bo_gem; + busy.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); alloc_from_cache = (ret == 0 && busy.busy == 0); @@ -305,8 +305,6 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bucket->tail = &bucket->head; bucket->num_entries--; - bo_gem->gem_handle = entry->gem_handle; - bo_gem->bo.offset = entry->last_offset; free(entry); } } @@ -314,8 +312,13 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, if (!alloc_from_cache) { struct drm_gem_create create; + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + bo_gem->bo.size = bo_size; memset(&create, 0, sizeof(create)); - create.size = bo_gem->bo.size; + create.size = bo_size; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create); bo_gem->gem_handle = create.handle; @@ -323,10 +326,9 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, free(bo_gem); return NULL; } + bo_gem->bo.bufmgr = bufmgr; } - bo_gem->bo.virtual = NULL; - bo_gem->bo.bufmgr = bufmgr; bo_gem->name = name; bo_gem->refcount = 1; bo_gem->validate_index = -1; @@ -400,9 +402,6 @@ dri_gem_bo_unreference(dri_bo *bo) struct dri_gem_bo_bucket *bucket; int ret; - if (bo_gem->mapped) - munmap (bo_gem->virtual, bo->size); - if (bo_gem->relocs != NULL) { int i; @@ -413,6 +412,9 @@ dri_gem_bo_unreference(dri_bo *bo) free(bo_gem->relocs); } + DBG("bo_unreference final: %d (%s)\n", + bo_gem->gem_handle, bo_gem->name); + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size); /* Put the buffer into our internal cache for reuse if we can. */ if (bucket != NULL && @@ -422,9 +424,14 @@ dri_gem_bo_unreference(dri_bo *bo) { struct dri_gem_bo_bucket_entry *entry; + bo_gem->name = 0; + bo_gem->validate_index = -1; + bo_gem->relocs = NULL; + bo_gem->reloc_target_bo = NULL; + bo_gem->reloc_count = 0; + entry = calloc(1, sizeof(*entry)); - entry->gem_handle = bo_gem->gem_handle; - entry->last_offset = bo->offset; + entry->bo_gem = bo_gem; entry->next = NULL; *bucket->tail = entry; @@ -441,12 +448,9 @@ dri_gem_bo_unreference(dri_bo *bo) "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", bo_gem->gem_handle, bo_gem->name, strerror(-ret)); } + free(bo); } - DBG("bo_unreference final: %d (%s)\n", - bo_gem->gem_handle, bo_gem->name); - - free(bo); return; } } @@ -604,6 +608,7 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) while ((entry = bucket->head) != NULL) { struct drm_gem_close close; + dri_bo_gem *bo_gem; int ret; bucket->head = entry->next; @@ -611,14 +616,19 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) bucket->tail = &bucket->head; bucket->num_entries--; + bo_gem = entry->bo_gem; + if (bo_gem->mapped) + munmap (bo_gem->virtual, bo_gem->bo.size); + /* Close this object */ - close.handle = entry->gem_handle; + close.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %s\n", strerror(-ret)); } + free(bo_gem); free(entry); } } commit 5f5badb26f761eec87b951ce1b7b3a51a5060c50 Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 14:09:57 2008 -0700 [intel] Allocate hardware status page at driver load time I couldn't get the re-allocated HWS to work on my 965GM, so I just gave up and made it persist across the lifetime of the driver instead. diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 37c822c..1ea5c28 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -69,6 +69,44 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) return -EBUSY; } +int i915_init_hardware_status(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + /* Program Hardware Status Page */ + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(0x02080, dev_priv->dma_status_page); + DRM_DEBUG("Enabled hardware status page\n"); + return 0; +} + +void i915_free_hardware_status(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + /* Need to rewrite hardware status page */ + I915_WRITE(0x02080, 0x1ffff000); + } + + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + I915_WRITE(0x02080, 0x1ffff000); + } +} + #if I915_RING_VALIDATE /** * Validate the cached ring tail value @@ -122,18 +160,8 @@ static int i915_dma_cleanup(struct drm_device * dev) dev_priv->ring.map.size = 0; } - if (dev_priv->status_page_dmah) { - drm_pci_free(dev, dev_priv->status_page_dmah); - dev_priv->status_page_dmah = NULL; - /* Need to rewrite hardware status page */ - I915_WRITE(0x02080, 0x1ffff000); - } - - if (dev_priv->status_gfx_addr) { - dev_priv->status_gfx_addr = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); - I915_WRITE(0x02080, 0x1ffff000); - } + if (I915_NEED_GFX_HWS(dev)) + i915_free_hardware_status(dev); return 0; } @@ -269,24 +297,6 @@ static int i915_initialize(struct drm_device * dev, */ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; - /* Program Hardware Status Page */ - if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; - } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - - I915_WRITE(0x02080, dev_priv->dma_status_page); - } - DRM_DEBUG("Enabled hardware status page\n"); #ifdef I915_HAVE_BUFFER mutex_init(&dev_priv->cmdbuf_mutex); #endif @@ -1078,6 +1088,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) #endif #endif + /* Init HWS + */ + if (!I915_NEED_GFX_HWS(dev)) { + ret = i915_init_hardware_status(dev); + if(ret) + return ret; + } + return ret; } @@ -1095,6 +1113,8 @@ int i915_driver_unload(struct drm_device *dev) intel_fini_chipset_flush_compat(dev); #endif #endif + i915_free_hardware_status(dev); + return 0; } commit 84162ccb7dc0286336292ac7f8e80678bfc11804 Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 13:49:21 2008 -0700 Ignore X server provided mmio address diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index a948834..37c822c 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -214,14 +214,6 @@ static int i915_initialize(struct drm_device * dev, return -EINVAL; } - if (init->mmio_offset != 0) - dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); - if (!dev_priv->mmio_map) { - i915_dma_cleanup(dev); - DRM_ERROR("can not find mmio map!\n"); - return -EINVAL; - } - #ifdef I915_HAVE_BUFFER dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; #endif @@ -323,11 +315,6 @@ static int i915_dma_resume(struct drm_device * dev) return -EINVAL; } - if (!dev_priv->mmio_map) { - DRM_ERROR("can not find mmio map!\n"); - return -EINVAL; - } - if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); commit 118baeee1820102177f4f5bb48dd2a1e3d95d21e Author: Keith Packard <ke...@ke...> Date: Thu Jun 5 13:47:41 2008 -0700 [intel-gem] Dump error status on wait_request failure diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index ad73f0a..268411e 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -319,6 +319,9 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) seqno)); i915_user_irq_off(dev_priv); } + if (ret) + DRM_ERROR ("%s returns %d (awaiting %d at %d)\n", + __func__, ret, seqno, i915_get_gem_seqno(dev)); /* Directly dispatch request retiring. While we have the work queue * to handle this, the waiter on a request often wants an associated |