From: <an...@ke...> - 2006-09-07 06:55:30
|
bsd-core/drmP.h | 6 +++++- bsd-core/drm_drv.c | 3 +++ linux-core/drmP.h | 2 ++ linux-core/drm_stub.c | 2 ++ shared-core/i915_dma.c | 8 ++++---- 5 files changed, 16 insertions(+), 5 deletions(-) New commits: diff-tree dddacd7a3a4bd0c453b346cee70d1d36a401e539 (from 55057660f035a03078910d678e5fd9b0cb0b795a) Author: Eric Anholt <er...@an...> Date: Wed Sep 6 23:26:50 2006 -0700 Use the DRM_INIT_WAITQUEUE argument (needed on Linux) to avoid a warning. diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 6fa3973..9ebb12a 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -170,7 +170,7 @@ MALLOC_DECLARE(M_DRM); #define wait_queue_head_t atomic_t #define DRM_WAKEUP(w) wakeup((void *)w) #define DRM_WAKEUP_INT(w) wakeup(w) -#define DRM_INIT_WAITQUEUE(queue) do {} while (0) +#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0) #if defined(__FreeBSD__) && __FreeBSD_version < 502109 #define bus_alloc_resource_any(dev, type, rid, flags) \ diff-tree 55057660f035a03078910d678e5fd9b0cb0b795a (from d5726761858b1ff0fd6e6ee92ec1648fbb958a53) Author: Eric Anholt <er...@an...> Date: Wed Sep 6 23:25:14 2006 -0700 Put the PCI device/vendor id in the drm_device_t. This helps us unbreak FreeBSD DRM from the 965 changes. diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index e6c1d06..6fa3973 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -714,6 +714,9 @@ struct drm_device { struct drm_driver_info driver; drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ + u_int16_t pci_device; /* PCI device id */ + u_int16_t pci_vendor; /* PCI vendor id */ + char *unique; /* Unique identifier: e.g., busid */ int unique_len; /* Length of unique field */ #ifdef __FreeBSD__ diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 3f53a72..9fb10c5 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -516,6 +516,9 @@ static int drm_load(drm_device_t *dev) dev->pci_slot = pci_get_slot(dev->device); dev->pci_func = pci_get_function(dev->device); + dev->pci_vendor = pci_get_vendor(dev->device); + dev->pci_device = pci_get_device(dev->device); + TAILQ_INIT(&dev->maplist); drm_mem_init(); diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 6046dde..70bf349 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -747,6 +747,8 @@ typedef struct drm_device { drm_agp_head_t *agp; /**< AGP data */ struct pci_dev *pdev; /**< PCI device structure */ + int pci_vendor; /**< PCI vendor id */ + int pci_device; /**< PCI device id */ #ifdef __alpha__ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) struct pci_controler *hose; diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index bdc3655..4708222 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -66,6 +66,8 @@ static int drm_fill_in_dev(drm_device_t mutex_init(&dev->ctxlist_mutex); dev->pdev = pdev; + dev->pci_device = pdev->device; + dev->pci_vendor = pdev->vendor; #ifdef __alpha__ dev->hose = pdev->sysdata; diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index ba8c56e..3863490 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -31,10 +31,10 @@ #include "i915_drm.h" #include "i915_drv.h" -#define IS_I965G(dev) (dev->pdev->device == 0x2972 || \ - dev->pdev->device == 0x2982 || \ - dev->pdev->device == 0x2992 || \ - dev->pdev->device == 0x29A2) +#define IS_I965G(dev) (dev->pci_device == 0x2972 || \ + dev->pci_device == 0x2982 || \ + dev->pci_device == 0x2992 || \ + dev->pci_device == 0x29A2) /* Really want an OS-independent resettable timer. Would like to have diff-tree d5726761858b1ff0fd6e6ee92ec1648fbb958a53 (from 9b984b34e99f694e10251e15bc2ec1bc844dcca4) Author: Eric Anholt <er...@an...> Date: Wed Sep 6 23:08:29 2006 -0700 Add a typedef for u64. diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 074e1d2..e6c1d06 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -270,6 +270,7 @@ extern struct cfdriver drm_cd; #endif typedef unsigned long dma_addr_t; +typedef u_int64_t u64; typedef u_int32_t u32; typedef u_int16_t u16; typedef u_int8_t u8; |
From: <da...@ke...> - 2006-12-01 10:00:52
|
.gitignore | 102 +++++++++++++++++++++++----------------------- bsd-core/drm_bufs.c | 3 + bsd-core/drm_drawable.c | 52 +++++++++++++++++++++++ linux-core/Makefile | 39 +---------------- linux-core/drm.h | 1 linux-core/drm_sarea.h | 1 linux-core/i915_dma.c | 1 linux-core/i915_drm.h | 1 linux-core/i915_drv.h | 1 linux-core/i915_irq.c | 1 linux-core/i915_mem.c | 1 linux-core/linux | 1 linux-core/mach64_dma.c | 1 linux-core/mach64_drm.h | 1 linux-core/mach64_drv.h | 1 linux-core/mach64_irq.c | 1 linux-core/mach64_state.c | 1 linux-core/mga_dma.c | 1 linux-core/mga_drm.h | 1 linux-core/mga_drv.h | 1 linux-core/mga_irq.c | 1 linux-core/mga_state.c | 1 linux-core/mga_ucode.h | 1 linux-core/mga_warp.c | 1 linux-core/nv_drv.h | 1 linux-core/r128_cce.c | 1 linux-core/r128_drm.h | 1 linux-core/r128_drv.h | 1 linux-core/r128_irq.c | 1 linux-core/r128_state.c | 1 linux-core/r300_cmdbuf.c | 1 linux-core/r300_reg.h | 1 linux-core/radeon_cp.c | 1 linux-core/radeon_drm.h | 1 linux-core/radeon_drv.h | 1 linux-core/radeon_irq.c | 1 linux-core/radeon_mem.c | 1 linux-core/radeon_state.c | 1 linux-core/savage_bci.c | 1 linux-core/savage_drm.h | 1 linux-core/savage_drv.h | 1 linux-core/savage_state.c | 1 linux-core/sis_drm.h | 1 linux-core/sis_drv.h | 1 linux-core/tdfx_drv.h | 1 linux-core/via_3d_reg.h | 1 linux-core/via_dma.c | 1 linux-core/via_drm.h | 1 linux-core/via_drv.c | 1 linux-core/via_drv.h | 1 linux-core/via_irq.c | 1 linux-core/via_map.c | 1 linux-core/via_verifier.c | 1 linux-core/via_verifier.h | 1 linux-core/via_video.c | 1 55 files changed, 159 insertions(+), 88 deletions(-) New commits: diff-tree 74a92bbf6ea9b9766a2b827f22605559791569b8 (from a97bb85c2a6852e37ed560e6cbe1242e5f68ad8d) Author: Michel Dänzer <mi...@tu...> Date: Fri Dec 1 11:00:32 2006 +0100 Core build fix for BSD. diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 33da79e..343ab1e 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -316,6 +316,9 @@ void drm_rmmap(drm_device_t *dev, drm_lo case _DRM_CONSISTENT: drm_pci_free(dev, map->dmah); break; + default: + DRM_ERROR("Bad map type %d\n", map->type); + break; } if (map->bsr != NULL) { diff-tree a97bb85c2a6852e37ed560e6cbe1242e5f68ad8d (from 4a0e61d91013f88ca9555a280e2363bed14aec02) Author: Michel Dänzer <mi...@tu...> Date: Fri Dec 1 10:46:21 2006 +0100 Unshare drm_drawable.c again for now. The current version didn't build on BSD, where the new functionality isn't used yet anyway. Whoever changes that will hopefully be able to make the OSes share this file as well. diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c deleted file mode 120000 index d64bbe1..379e0aa --- a/bsd-core/drm_drawable.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/drm_drawable.c \ No newline at end of file diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c new file mode 100644 index d64bbe1..379e0aa --- /dev/null +++ b/bsd-core/drm_drawable.c @@ -0,0 +1,51 @@ +/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*- + * Created: Tue Feb 2 08:37:54 1999 by fa...@va... + */ +/*- + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <fa...@va...> + * Gareth Hughes <ga...@va...> + * + */ + +#include "drmP.h" + +int drm_adddraw(DRM_IOCTL_ARGS) +{ + drm_draw_t draw; + + draw.handle = 0; /* NOOP */ + DRM_DEBUG("%d\n", draw.handle); + + DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) ); + + return 0; +} + +int drm_rmdraw(DRM_IOCTL_ARGS) +{ + return 0; /* NOOP */ +} diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c deleted file mode 120000 index d64bbe1..0817e32 --- a/linux-core/drm_drawable.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/drm_drawable.c \ No newline at end of file diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c new file mode 100644 index d64bbe1..0817e32 --- /dev/null +++ b/linux-core/drm_drawable.c @@ -0,0 +1,330 @@ +/** + * \file drm_drawable.c + * IOCTLs for drawables + * + * \author Rickard E. (Rik) Faith <fa...@va...> + * \author Gareth Hughes <ga...@va...> + * \author Michel Dänzer <mi...@tu...> + */ + +/* + * Created: Tue Feb 2 08:37:54 1999 by fa...@va... + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** + * Allocate drawable ID and memory to store information about it. + */ +int drm_adddraw(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + unsigned long irqflags; + int i, j; + u32 *bitfield = dev->drw_bitfield; + unsigned int bitfield_length = dev->drw_bitfield_length; + drm_drawable_info_t **info = dev->drw_info; + unsigned int info_length = dev->drw_info_length; + drm_draw_t draw; + + for (i = 0, j = 0; i < bitfield_length; i++) { + if (bitfield[i] == ~0) + continue; + + for (; j < 8 * sizeof(*bitfield); j++) + if (!(bitfield[i] & (1 << j))) + goto done; + } +done: + + if (i == bitfield_length) { + bitfield_length++; + + bitfield = drm_alloc(bitfield_length * sizeof(*bitfield), + DRM_MEM_BUFS); + + if (!bitfield) { + DRM_ERROR("Failed to allocate new drawable bitfield\n"); + return DRM_ERR(ENOMEM); + } + + if (8 * sizeof(*bitfield) * bitfield_length > info_length) { + info_length += 8 * sizeof(*bitfield); + + info = drm_alloc(info_length * sizeof(*info), + DRM_MEM_BUFS); + + if (!info) { + DRM_ERROR("Failed to allocate new drawable info" + " array\n"); + + drm_free(bitfield, + bitfield_length * sizeof(*bitfield), + DRM_MEM_BUFS); + return DRM_ERR(ENOMEM); + } + } + + bitfield[i] = 0; + } + + draw.handle = i * 8 * sizeof(*bitfield) + j + 1; + DRM_DEBUG("%d\n", draw.handle); + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + bitfield[i] |= 1 << j; + info[draw.handle - 1] = NULL; + + if (bitfield != dev->drw_bitfield) { + memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length * + sizeof(*bitfield)); + drm_free(dev->drw_bitfield, sizeof(*bitfield) * + dev->drw_bitfield_length, DRM_MEM_BUFS); + dev->drw_bitfield = bitfield; + dev->drw_bitfield_length = bitfield_length; + } + + if (info != dev->drw_info) { + memcpy(info, dev->drw_info, dev->drw_info_length * + sizeof(*info)); + drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length, + DRM_MEM_BUFS); + dev->drw_info = info; + dev->drw_info_length = info_length; + } + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + + return 0; +} + +/** + * Free drawable ID and memory to store information about it. + */ +int drm_rmdraw(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_draw_t draw; + int id, idx; + unsigned int shift; + unsigned long irqflags; + u32 *bitfield = dev->drw_bitfield; + unsigned int bitfield_length = dev->drw_bitfield_length; + drm_drawable_info_t **info = dev->drw_info; + unsigned int info_length = dev->drw_info_length; + + DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, + sizeof(draw)); + + id = draw.handle - 1; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_DEBUG("No such drawable %d\n", draw.handle); + return 0; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + bitfield[idx] &= ~(1 << shift); + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + if (info[id]) { + drm_free(info[id]->rects, info[id]->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + drm_free(info[id], sizeof(**info), DRM_MEM_BUFS); + } + + /* Can we shrink the arrays? */ + if (idx == bitfield_length - 1) { + while (idx >= 0 && !bitfield[idx]) + --idx; + + bitfield_length = idx + 1; + + if (idx != id / (8 * sizeof(*bitfield))) + bitfield = drm_alloc(bitfield_length * + sizeof(*bitfield), DRM_MEM_BUFS); + + if (!bitfield && bitfield_length) { + bitfield = dev->drw_bitfield; + bitfield_length = dev->drw_bitfield_length; + } + } + + if (bitfield != dev->drw_bitfield) { + info_length = 8 * sizeof(*bitfield) * bitfield_length; + + info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS); + + if (!info && info_length) { + info = dev->drw_info; + info_length = dev->drw_info_length; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + memcpy(bitfield, dev->drw_bitfield, bitfield_length * + sizeof(*bitfield)); + drm_free(dev->drw_bitfield, sizeof(*bitfield) * + dev->drw_bitfield_length, DRM_MEM_BUFS); + dev->drw_bitfield = bitfield; + dev->drw_bitfield_length = bitfield_length; + + if (info != dev->drw_info) { + memcpy(info, dev->drw_info, info_length * + sizeof(*info)); + drm_free(dev->drw_info, sizeof(*info) * + dev->drw_info_length, DRM_MEM_BUFS); + dev->drw_info = info; + dev->drw_info_length = info_length; + } + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + } + + DRM_DEBUG("%d\n", draw.handle); + return 0; +} + +int drm_update_drawable_info(DRM_IOCTL_ARGS) { + DRM_DEVICE; + drm_update_draw_t update; + unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length; + u32 *bitfield = dev->drw_bitfield; + unsigned long irqflags; + drm_drawable_info_t *info; + drm_clip_rect_t *rects; + int err; + + DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, + sizeof(update)); + + id = update.handle - 1; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_ERROR("No such drawable %d\n", update.handle); + return DRM_ERR(EINVAL); + } + + info = dev->drw_info[id]; + + if (!info) { + info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS); + + if (!info) { + DRM_ERROR("Failed to allocate drawable info memory\n"); + return DRM_ERR(ENOMEM); + } + } + + switch (update.type) { + case DRM_DRAWABLE_CLIPRECTS: + if (update.num != info->num_rects) { + rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + DRM_MEM_BUFS); + } else + rects = info->rects; + + if (update.num && !rects) { + DRM_ERROR("Failed to allocate cliprect memory\n"); + err = DRM_ERR(ENOMEM); + goto error; + } + + if (update.num && DRM_COPY_FROM_USER(rects, + (drm_clip_rect_t __user *) + (unsigned long)update.data, + update.num * + sizeof(*rects))) { + DRM_ERROR("Failed to copy cliprects from userspace\n"); + err = DRM_ERR(EFAULT); + goto error; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + if (rects != info->rects) { + drm_free(info->rects, info->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + } + + info->rects = rects; + info->num_rects = update.num; + dev->drw_info[id] = info; + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + DRM_DEBUG("Updated %d cliprects for drawable %d\n", + info->num_rects, id); + break; + default: + DRM_ERROR("Invalid update type %d\n", update.type); + return DRM_ERR(EINVAL); + } + + return 0; + +error: + if (!dev->drw_info[id]) + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + else if (rects != dev->drw_info[id]->rects) + drm_free(rects, update.num * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + + return err; +} + +/** + * Caller must hold the drawable spinlock! + */ +drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { + u32 *bitfield = dev->drw_bitfield; + unsigned int idx, shift; + + id--; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= dev->drw_bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_DEBUG("No such drawable %d\n", id); + return NULL; + } + + return dev->drw_info[id]; +} +EXPORT_SYMBOL(drm_get_drawable_info); diff --git a/shared-core/drm_drawable.c b/shared-core/drm_drawable.c deleted file mode 100644 index 0817e32..0000000 --- a/shared-core/drm_drawable.c +++ /dev/null @@ -1,330 +0,0 @@ -/** - * \file drm_drawable.c - * IOCTLs for drawables - * - * \author Rickard E. (Rik) Faith <fa...@va...> - * \author Gareth Hughes <ga...@va...> - * \author Michel Dänzer <mi...@tu...> - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by fa...@va... - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "drmP.h" - -/** - * Allocate drawable ID and memory to store information about it. - */ -int drm_adddraw(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - unsigned long irqflags; - int i, j; - u32 *bitfield = dev->drw_bitfield; - unsigned int bitfield_length = dev->drw_bitfield_length; - drm_drawable_info_t **info = dev->drw_info; - unsigned int info_length = dev->drw_info_length; - drm_draw_t draw; - - for (i = 0, j = 0; i < bitfield_length; i++) { - if (bitfield[i] == ~0) - continue; - - for (; j < 8 * sizeof(*bitfield); j++) - if (!(bitfield[i] & (1 << j))) - goto done; - } -done: - - if (i == bitfield_length) { - bitfield_length++; - - bitfield = drm_alloc(bitfield_length * sizeof(*bitfield), - DRM_MEM_BUFS); - - if (!bitfield) { - DRM_ERROR("Failed to allocate new drawable bitfield\n"); - return DRM_ERR(ENOMEM); - } - - if (8 * sizeof(*bitfield) * bitfield_length > info_length) { - info_length += 8 * sizeof(*bitfield); - - info = drm_alloc(info_length * sizeof(*info), - DRM_MEM_BUFS); - - if (!info) { - DRM_ERROR("Failed to allocate new drawable info" - " array\n"); - - drm_free(bitfield, - bitfield_length * sizeof(*bitfield), - DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); - } - } - - bitfield[i] = 0; - } - - draw.handle = i * 8 * sizeof(*bitfield) + j + 1; - DRM_DEBUG("%d\n", draw.handle); - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - bitfield[i] |= 1 << j; - info[draw.handle - 1] = NULL; - - if (bitfield != dev->drw_bitfield) { - memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length * - sizeof(*bitfield)); - drm_free(dev->drw_bitfield, sizeof(*bitfield) * - dev->drw_bitfield_length, DRM_MEM_BUFS); - dev->drw_bitfield = bitfield; - dev->drw_bitfield_length = bitfield_length; - } - - if (info != dev->drw_info) { - memcpy(info, dev->drw_info, dev->drw_info_length * - sizeof(*info)); - drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length, - DRM_MEM_BUFS); - dev->drw_info = info; - dev->drw_info_length = info_length; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); - - return 0; -} - -/** - * Free drawable ID and memory to store information about it. - */ -int drm_rmdraw(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - drm_draw_t draw; - int id, idx; - unsigned int shift; - unsigned long irqflags; - u32 *bitfield = dev->drw_bitfield; - unsigned int bitfield_length = dev->drw_bitfield_length; - drm_drawable_info_t **info = dev->drw_info; - unsigned int info_length = dev->drw_info_length; - - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, - sizeof(draw)); - - id = draw.handle - 1; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_DEBUG("No such drawable %d\n", draw.handle); - return 0; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - bitfield[idx] &= ~(1 << shift); - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - if (info[id]) { - drm_free(info[id]->rects, info[id]->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(info[id], sizeof(**info), DRM_MEM_BUFS); - } - - /* Can we shrink the arrays? */ - if (idx == bitfield_length - 1) { - while (idx >= 0 && !bitfield[idx]) - --idx; - - bitfield_length = idx + 1; - - if (idx != id / (8 * sizeof(*bitfield))) - bitfield = drm_alloc(bitfield_length * - sizeof(*bitfield), DRM_MEM_BUFS); - - if (!bitfield && bitfield_length) { - bitfield = dev->drw_bitfield; - bitfield_length = dev->drw_bitfield_length; - } - } - - if (bitfield != dev->drw_bitfield) { - info_length = 8 * sizeof(*bitfield) * bitfield_length; - - info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS); - - if (!info && info_length) { - info = dev->drw_info; - info_length = dev->drw_info_length; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - memcpy(bitfield, dev->drw_bitfield, bitfield_length * - sizeof(*bitfield)); - drm_free(dev->drw_bitfield, sizeof(*bitfield) * - dev->drw_bitfield_length, DRM_MEM_BUFS); - dev->drw_bitfield = bitfield; - dev->drw_bitfield_length = bitfield_length; - - if (info != dev->drw_info) { - memcpy(info, dev->drw_info, info_length * - sizeof(*info)); - drm_free(dev->drw_info, sizeof(*info) * - dev->drw_info_length, DRM_MEM_BUFS); - dev->drw_info = info; - dev->drw_info_length = info_length; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - } - - DRM_DEBUG("%d\n", draw.handle); - return 0; -} - -int drm_update_drawable_info(DRM_IOCTL_ARGS) { - DRM_DEVICE; - drm_update_draw_t update; - unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length; - u32 *bitfield = dev->drw_bitfield; - unsigned long irqflags; - drm_drawable_info_t *info; - drm_clip_rect_t *rects; - int err; - - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, - sizeof(update)); - - id = update.handle - 1; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_ERROR("No such drawable %d\n", update.handle); - return DRM_ERR(EINVAL); - } - - info = dev->drw_info[id]; - - if (!info) { - info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS); - - if (!info) { - DRM_ERROR("Failed to allocate drawable info memory\n"); - return DRM_ERR(ENOMEM); - } - } - - switch (update.type) { - case DRM_DRAWABLE_CLIPRECTS: - if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), - DRM_MEM_BUFS); - } else - rects = info->rects; - - if (update.num && !rects) { - DRM_ERROR("Failed to allocate cliprect memory\n"); - err = DRM_ERR(ENOMEM); - goto error; - } - - if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) - (unsigned long)update.data, - update.num * - sizeof(*rects))) { - DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = DRM_ERR(EFAULT); - goto error; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (rects != info->rects) { - drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - } - - info->rects = rects; - info->num_rects = update.num; - dev->drw_info[id] = info; - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, id); - break; - default: - DRM_ERROR("Invalid update type %d\n", update.type); - return DRM_ERR(EINVAL); - } - - return 0; - -error: - if (!dev->drw_info[id]) - drm_free(info, sizeof(*info), DRM_MEM_BUFS); - else if (rects != dev->drw_info[id]->rects) - drm_free(rects, update.num * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - - return err; -} - -/** - * Caller must hold the drawable spinlock! - */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - u32 *bitfield = dev->drw_bitfield; - unsigned int idx, shift; - - id--; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= dev->drw_bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_DEBUG("No such drawable %d\n", id); - return NULL; - } - - return dev->drw_info[id]; -} -EXPORT_SYMBOL(drm_get_drawable_info); diff-tree 4a0e61d91013f88ca9555a280e2363bed14aec02 (from ddcb994c3eac97e153922e2a4c71384404f68597) Author: Michel Dänzer <mi...@tu...> Date: Sat Oct 21 16:14:20 2006 +0200 Track linux-core symlinks in git. diff --git a/.gitignore b/.gitignore index 333bbad..4386f40 100644 --- a/.gitignore +++ b/.gitignore @@ -1,54 +1,54 @@ -*-core/linux -*-core/drm.h -*-core/drm_sarea.h -*-core/i915_dma.c -*-core/i915_drm.h -*-core/i915_drv.h -*-core/i915_irq.c -*-core/i915_mem.c -*-core/mach64_dma.c -*-core/mach64_drm.h -*-core/mach64_drv.h -*-core/mach64_irq.c -*-core/mach64_state.c -*-core/mga_dma.c -*-core/mga_drm.h -*-core/mga_drv.h -*-core/mga_irq.c -*-core/mga_state.c -*-core/mga_ucode.h -*-core/mga_warp.c -*-core/nv_drv.h -*-core/r128_cce.c -*-core/r128_drm.h -*-core/r128_drv.h -*-core/r128_irq.c -*-core/r128_state.c -*-core/r300_cmdbuf.c -*-core/r300_reg.h -*-core/radeon_cp.c -*-core/radeon_drm.h -*-core/radeon_drv.h -*-core/radeon_irq.c -*-core/radeon_mem.c -*-core/radeon_state.c -*-core/savage_bci.c -*-core/savage_drm.h -*-core/savage_drv.h -*-core/savage_state.c -*-core/sis_drm.h -*-core/sis_drv.h -*-core/tdfx_drv.h -*-core/via_3d_reg.h -*-core/via_dma.c -*-core/via_drm.h -*-core/via_drv.c -*-core/via_drv.h -*-core/via_irq.c -*-core/via_map.c -*-core/via_verifier.c -*-core/via_verifier.h -*-core/via_video.c +bsd-core/linux +bsd-core/drm.h +bsd-core/drm_sarea.h +bsd-core/i915_dma.c +bsd-core/i915_drm.h +bsd-core/i915_drv.h +bsd-core/i915_irq.c +bsd-core/i915_mem.c +bsd-core/mach64_dma.c +bsd-core/mach64_drm.h +bsd-core/mach64_drv.h +bsd-core/mach64_irq.c +bsd-core/mach64_state.c +bsd-core/mga_dma.c +bsd-core/mga_drm.h +bsd-core/mga_drv.h +bsd-core/mga_irq.c +bsd-core/mga_state.c +bsd-core/mga_ucode.h +bsd-core/mga_warp.c +bsd-core/nv_drv.h +bsd-core/r128_cce.c +bsd-core/r128_drm.h +bsd-core/r128_drv.h +bsd-core/r128_irq.c +bsd-core/r128_state.c +bsd-core/r300_cmdbuf.c +bsd-core/r300_reg.h +bsd-core/radeon_cp.c +bsd-core/radeon_drm.h +bsd-core/radeon_drv.h +bsd-core/radeon_irq.c +bsd-core/radeon_mem.c +bsd-core/radeon_state.c +bsd-core/savage_bci.c +bsd-core/savage_drm.h +bsd-core/savage_drv.h +bsd-core/savage_state.c +bsd-core/sis_drm.h +bsd-core/sis_drv.h +bsd-core/tdfx_drv.h +bsd-core/via_3d_reg.h +bsd-core/via_dma.c +bsd-core/via_drm.h +bsd-core/via_drv.c +bsd-core/via_drv.h +bsd-core/via_irq.c +bsd-core/via_map.c +bsd-core/via_verifier.c +bsd-core/via_verifier.h +bsd-core/via_video.c *.flags *.ko *.ko.cmd diff --git a/linux-core/Makefile b/linux-core/Makefile index 3aecec4..b4cff78 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -75,45 +75,26 @@ DRM_MODULES ?= $(MODULE_LIST) # These definitions are for handling dependencies in the out of kernel build. -DRMSHARED = drm.h drm_sarea.h drm_drawable.c DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h TDFXHEADERS = tdfx_drv.h $(DRMHEADERS) -TDFXSHARED = tdfx_drv.h R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS) -R128SHARED = r128_drv.h r128_drm.h r128_cce.c r128_state.c r128_irq.c RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS) -RADEONSHARED = radeon_drv.h radeon_drm.h radeon_cp.c radeon_irq.c \ - radeon_mem.c radeon_state.c r300_cmdbuf.c r300_reg.h MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS) -MGASHARED = mga_dma.c mga_drm.h mga_drv.h mga_irq.c mga_state.c \ - mga_ucode.h mga_warp.c I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS) I830HEADERS = i830_drv.h i830_drm.h $(DRMHEADERS) I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS) -I915SHARED = i915_drv.h i915_drm.h i915_irq.c i915_mem.c i915_dma.c SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS) -SISSHARED= sis_drv.h sis_drm.h SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS) -SAVAGESHARED= savage_drv.h savage_drm.h savage_bci.c savage_state.c VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS) -VIASHARED = via_drm.h via_drv.h via_3d_reg.h via_drv.c via_irq.c via_map.c \ - via_dma.c via_verifier.c via_verifier.h via_video.c MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) -MACH64SHARED = mach64_drv.h mach64_drm.h mach64_dma.c \ - mach64_irq.c mach64_state.c NVHEADERS = nv_drv.h $(DRMHEADERS) -NVSHARED = nv_drv.h FFBHEADERS = ffb_drv.h $(DRMHEADERS) -SHAREDSRC = $(DRMSHARED) $(MGASHARED) $(R128SHARED) $(RADEONSHARED) \ - $(SISSHARED) $(TDFXSHARED) $(VIASHARED) $(MACH64SHARED) \ - $(I915SHARED) $(SAVAGESHARED) $(NVSHARED) - PROGS = dristat drmstat -CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c linux drm_pciids.h .tmp_versions +CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions # VERSION is not defined from the initial invocation. It is defined when # this Makefile is invoked from the kernel's root Makefile. @@ -226,27 +207,13 @@ endif SHAREDDIR := ../shared-core -HASSHARED := $(shell if [ -d $(SHAREDDIR) ]; then echo y; fi) - -ifeq ($(HASSHARED),y) -includes:: $(SHAREDSRC) drm_pciids.h +ifeq ($(shell if [ -d $(SHAREDDIR) ]; then echo y; fi),y) +includes:: drm_pciids.h drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt sh ../scripts/create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt - -$(SHAREDSRC): - @if [ -r $(SHAREDDIR)/$@ ]; then \ - (rm -f $@; set -x; ln -s $(SHAREDDIR)/$@ $@); fi - -CLEANFILES += $(SHAREDSRC) endif -includes:: linux - -linux: - rm -f linux - ln -s . linux - clean cleandir: rm -rf $(CLEANFILES) diff --git a/linux-core/drm.h b/linux-core/drm.h new file mode 120000 index 0000000..2963669 --- /dev/null +++ b/linux-core/drm.h @@ -0,0 +1 @@ +../shared-core/drm.h \ No newline at end of file diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c new file mode 120000 index 0000000..d64bbe1 --- /dev/null +++ b/linux-core/drm_drawable.c @@ -0,0 +1 @@ +../shared-core/drm_drawable.c \ No newline at end of file diff --git a/linux-core/drm_sarea.h b/linux-core/drm_sarea.h new file mode 120000 index 0000000..fd428f4 --- /dev/null +++ b/linux-core/drm_sarea.h @@ -0,0 +1 @@ +../shared-core/drm_sarea.h \ No newline at end of file diff --git a/linux-core/i915_dma.c b/linux-core/i915_dma.c new file mode 120000 index 0000000..c61d967 --- /dev/null +++ b/linux-core/i915_dma.c @@ -0,0 +1 @@ +../shared-core/i915_dma.c \ No newline at end of file diff --git a/linux-core/i915_drm.h b/linux-core/i915_drm.h new file mode 120000 index 0000000..ed53f01 --- /dev/null +++ b/linux-core/i915_drm.h @@ -0,0 +1 @@ +../shared-core/i915_drm.h \ No newline at end of file diff --git a/linux-core/i915_drv.h b/linux-core/i915_drv.h new file mode 120000 index 0000000..085558c --- /dev/null +++ b/linux-core/i915_drv.h @@ -0,0 +1 @@ +../shared-core/i915_drv.h \ No newline at end of file diff --git a/linux-core/i915_irq.c b/linux-core/i915_irq.c new file mode 120000 index 0000000..2058a2e --- /dev/null +++ b/linux-core/i915_irq.c @@ -0,0 +1 @@ +../shared-core/i915_irq.c \ No newline at end of file diff --git a/linux-core/i915_mem.c b/linux-core/i915_mem.c new file mode 120000 index 0000000..e8e5655 --- /dev/null +++ b/linux-core/i915_mem.c @@ -0,0 +1 @@ +../shared-core/i915_mem.c \ No newline at end of file diff --git a/linux-core/linux b/linux-core/linux new file mode 120000 index 0000000..945c9b4 --- /dev/null +++ b/linux-core/linux @@ -0,0 +1 @@ +. \ No newline at end of file diff --git a/linux-core/mach64_dma.c b/linux-core/mach64_dma.c new file mode 120000 index 0000000..e5c2897 --- /dev/null +++ b/linux-core/mach64_dma.c @@ -0,0 +1 @@ +../shared-core/mach64_dma.c \ No newline at end of file diff --git a/linux-core/mach64_drm.h b/linux-core/mach64_drm.h new file mode 120000 index 0000000..136ea93 --- /dev/null +++ b/linux-core/mach64_drm.h @@ -0,0 +1 @@ +../shared-core/mach64_drm.h \ No newline at end of file diff --git a/linux-core/mach64_drv.h b/linux-core/mach64_drv.h new file mode 120000 index 0000000..85222cc --- /dev/null +++ b/linux-core/mach64_drv.h @@ -0,0 +1 @@ +../shared-core/mach64_drv.h \ No newline at end of file diff --git a/linux-core/mach64_irq.c b/linux-core/mach64_irq.c new file mode 120000 index 0000000..a1235d5 --- /dev/null +++ b/linux-core/mach64_irq.c @@ -0,0 +1 @@ +../shared-core/mach64_irq.c \ No newline at end of file diff --git a/linux-core/mach64_state.c b/linux-core/mach64_state.c new file mode 120000 index 0000000..b11f202 --- /dev/null +++ b/linux-core/mach64_state.c @@ -0,0 +1 @@ +../shared-core/mach64_state.c \ No newline at end of file diff --git a/linux-core/mga_dma.c b/linux-core/mga_dma.c new file mode 120000 index 0000000..f290be9 --- /dev/null +++ b/linux-core/mga_dma.c @@ -0,0 +1 @@ +../shared-core/mga_dma.c \ No newline at end of file diff --git a/linux-core/mga_drm.h b/linux-core/mga_drm.h new file mode 120000 index 0000000..1c87036 --- /dev/null +++ b/linux-core/mga_drm.h @@ -0,0 +1 @@ +../shared-core/mga_drm.h \ No newline at end of file diff --git a/linux-core/mga_drv.h b/linux-core/mga_drv.h new file mode 120000 index 0000000..cb0c9e1 --- /dev/null +++ b/linux-core/mga_drv.h @@ -0,0 +1 @@ +../shared-core/mga_drv.h \ No newline at end of file diff --git a/linux-core/mga_irq.c b/linux-core/mga_irq.c new file mode 120000 index 0000000..cf521d2 --- /dev/null +++ b/linux-core/mga_irq.c @@ -0,0 +1 @@ +../shared-core/mga_irq.c \ No newline at end of file diff --git a/linux-core/mga_state.c b/linux-core/mga_state.c new file mode 120000 index 0000000..8bda8ba --- /dev/null +++ b/linux-core/mga_state.c @@ -0,0 +1 @@ +../shared-core/mga_state.c \ No newline at end of file diff --git a/linux-core/mga_ucode.h b/linux-core/mga_ucode.h new file mode 120000 index 0000000..728b9ac --- /dev/null +++ b/linux-core/mga_ucode.h @@ -0,0 +1 @@ +../shared-core/mga_ucode.h \ No newline at end of file diff --git a/linux-core/mga_warp.c b/linux-core/mga_warp.c new file mode 120000 index 0000000..d35b325 --- /dev/null +++ b/linux-core/mga_warp.c @@ -0,0 +1 @@ +../shared-core/mga_warp.c \ No newline at end of file diff --git a/linux-core/nv_drv.h b/linux-core/nv_drv.h new file mode 120000 index 0000000..c961780 --- /dev/null +++ b/linux-core/nv_drv.h @@ -0,0 +1 @@ +../shared-core/nv_drv.h \ No newline at end of file diff --git a/linux-core/r128_cce.c b/linux-core/r128_cce.c new file mode 120000 index 0000000..0c1d659 --- /dev/null +++ b/linux-core/r128_cce.c @@ -0,0 +1 @@ +../shared-core/r128_cce.c \ No newline at end of file diff --git a/linux-core/r128_drm.h b/linux-core/r128_drm.h new file mode 120000 index 0000000..363852c --- /dev/null +++ b/linux-core/r128_drm.h @@ -0,0 +1 @@ +../shared-core/r128_drm.h \ No newline at end of file diff --git a/linux-core/r128_drv.h b/linux-core/r128_drv.h new file mode 120000 index 0000000..4f7e822 --- /dev/null +++ b/linux-core/r128_drv.h @@ -0,0 +1 @@ +../shared-core/r128_drv.h \ No newline at end of file diff --git a/linux-core/r128_irq.c b/linux-core/r128_irq.c new file mode 120000 index 0000000..66d28b0 --- /dev/null +++ b/linux-core/r128_irq.c @@ -0,0 +1 @@ +../shared-core/r128_irq.c \ No newline at end of file diff --git a/linux-core/r128_state.c b/linux-core/r128_state.c new file mode 120000 index 0000000..e83d84b --- /dev/null +++ b/linux-core/r128_state.c @@ -0,0 +1 @@ +../shared-core/r128_state.c \ No newline at end of file diff --git a/linux-core/r300_cmdbuf.c b/linux-core/r300_cmdbuf.c new file mode 120000 index 0000000..6674d05 --- /dev/null +++ b/linux-core/r300_cmdbuf.c @@ -0,0 +1 @@ +../shared-core/r300_cmdbuf.c \ No newline at end of file diff --git a/linux-core/r300_reg.h b/linux-core/r300_reg.h new file mode 120000 index 0000000..ef54eba --- /dev/null +++ b/linux-core/r300_reg.h @@ -0,0 +1 @@ +../shared-core/r300_reg.h \ No newline at end of file diff --git a/linux-core/radeon_cp.c b/linux-core/radeon_cp.c new file mode 120000 index 0000000..ee86094 --- /dev/null +++ b/linux-core/radeon_cp.c @@ -0,0 +1 @@ +../shared-core/radeon_cp.c \ No newline at end of file diff --git a/linux-core/radeon_drm.h b/linux-core/radeon_drm.h new file mode 120000 index 0000000..54f595a --- /dev/null +++ b/linux-core/radeon_drm.h @@ -0,0 +1 @@ +../shared-core/radeon_drm.h \ No newline at end of file diff --git a/linux-core/radeon_drv.h b/linux-core/radeon_drv.h new file mode 120000 index 0000000..5b415ea --- /dev/null +++ b/linux-core/radeon_drv.h @@ -0,0 +1 @@ +../shared-core/radeon_drv.h \ No newline at end of file diff --git a/linux-core/radeon_irq.c b/linux-core/radeon_irq.c new file mode 120000 index 0000000..2f394a5 --- /dev/null +++ b/linux-core/radeon_irq.c @@ -0,0 +1 @@ +../shared-core/radeon_irq.c \ No newline at end of file diff --git a/linux-core/radeon_mem.c b/linux-core/radeon_mem.c new file mode 120000 index 0000000..8cc2798 --- /dev/null +++ b/linux-core/radeon_mem.c @@ -0,0 +1 @@ +../shared-core/radeon_mem.c \ No newline at end of file diff --git a/linux-core/radeon_state.c b/linux-core/radeon_state.c new file mode 120000 index 0000000..ccee876 --- /dev/null +++ b/linux-core/radeon_state.c @@ -0,0 +1 @@ +../shared-core/radeon_state.c \ No newline at end of file diff --git a/linux-core/savage_bci.c b/linux-core/savage_bci.c new file mode 120000 index 0000000..b843671 --- /dev/null +++ b/linux-core/savage_bci.c @@ -0,0 +1 @@ +../shared-core/savage_bci.c \ No newline at end of file diff --git a/linux-core/savage_drm.h b/linux-core/savage_drm.h new file mode 120000 index 0000000..0dab2e3 --- /dev/null +++ b/linux-core/savage_drm.h @@ -0,0 +1 @@ +../shared-core/savage_drm.h \ No newline at end of file diff --git a/linux-core/savage_drv.h b/linux-core/savage_drv.h new file mode 120000 index 0000000..8397009 --- /dev/null +++ b/linux-core/savage_drv.h @@ -0,0 +1 @@ +../shared-core/savage_drv.h \ No newline at end of file diff --git a/linux-core/savage_state.c b/linux-core/savage_state.c new file mode 120000 index 0000000..e55dc5d --- /dev/null +++ b/linux-core/savage_state.c @@ -0,0 +1 @@ +../shared-core/savage_state.c \ No newline at end of file diff --git a/linux-core/sis_drm.h b/linux-core/sis_drm.h new file mode 120000 index 0000000..36c77aa --- /dev/null +++ b/linux-core/sis_drm.h @@ -0,0 +1 @@ +../shared-core/sis_drm.h \ No newline at end of file diff --git a/linux-core/sis_drv.h b/linux-core/sis_drv.h new file mode 120000 index 0000000..3fddfda --- /dev/null +++ b/linux-core/sis_drv.h @@ -0,0 +1 @@ +../shared-core/sis_drv.h \ No newline at end of file diff --git a/linux-core/tdfx_drv.h b/linux-core/tdfx_drv.h new file mode 120000 index 0000000..8df7032 --- /dev/null +++ b/linux-core/tdfx_drv.h @@ -0,0 +1 @@ +../shared-core/tdfx_drv.h \ No newline at end of file diff --git a/linux-core/via_3d_reg.h b/linux-core/via_3d_reg.h new file mode 120000 index 0000000..90d238e --- /dev/null +++ b/linux-core/via_3d_reg.h @@ -0,0 +1 @@ +../shared-core/via_3d_reg.h \ No newline at end of file diff --git a/linux-core/via_dma.c b/linux-core/via_dma.c new file mode 120000 index 0000000..1f4d920 --- /dev/null +++ b/linux-core/via_dma.c @@ -0,0 +1 @@ +../shared-core/via_dma.c \ No newline at end of file diff --git a/linux-core/via_drm.h b/linux-core/via_drm.h new file mode 120000 index 0000000..7cd175d --- /dev/null +++ b/linux-core/via_drm.h @@ -0,0 +1 @@ +../shared-core/via_drm.h \ No newline at end of file diff --git a/linux-core/via_drv.c b/linux-core/via_drv.c new file mode 120000 index 0000000..b6ff160 --- /dev/null +++ b/linux-core/via_drv.c @@ -0,0 +1 @@ +../shared-core/via_drv.c \ No newline at end of file diff --git a/linux-core/via_drv.h b/linux-core/via_drv.h new file mode 120000 index 0000000..8954fe8 --- /dev/null +++ b/linux-core/via_drv.h @@ -0,0 +1 @@ +../shared-core/via_drv.h \ No newline at end of file diff --git a/linux-core/via_irq.c b/linux-core/via_irq.c new file mode 120000 index 0000000..f615af8 --- /dev/null +++ b/linux-core/via_irq.c @@ -0,0 +1 @@ +../shared-core/via_irq.c \ No newline at end of file diff --git a/linux-core/via_map.c b/linux-core/via_map.c new file mode 120000 index 0000000..b505663 --- /dev/null +++ b/linux-core/via_map.c @@ -0,0 +1 @@ +../shared-core/via_map.c \ No newline at end of file diff --git a/linux-core/via_verifier.c b/linux-core/via_verifier.c new file mode 120000 index 0000000..00b411b --- /dev/null +++ b/linux-core/via_verifier.c @@ -0,0 +1 @@ +../shared-core/via_verifier.c \ No newline at end of file diff --git a/linux-core/via_verifier.h b/linux-core/via_verifier.h new file mode 120000 index 0000000..62d3e28 --- /dev/null +++ b/linux-core/via_verifier.h @@ -0,0 +1 @@ +../shared-core/via_verifier.h \ No newline at end of file diff --git a/linux-core/via_video.c b/linux-core/via_video.c new file mode 120000 index 0000000..a6d2794 --- /dev/null +++ b/linux-core/via_video.c @@ -0,0 +1 @@ +../shared-core/via_video.c \ No newline at end of file |
From: <ai...@ke...> - 2006-12-19 10:51:48
|
linux-core/i810_dma.c | 8 +------- linux-core/i830_dma.c | 8 +------- scripts/create_lk_drm.sh | 6 ++++++ shared-core/i915_dma.c | 9 ++++----- 4 files changed, 12 insertions(+), 19 deletions(-) New commits: diff-tree bc4c83573111361e9817d6a7414bd84f73ca7cce (from 7458909beae274198ca2a29b510a808ce2feca0a) Author: Dave Airlie <ai...@li...> Date: Tue Dec 19 21:51:30 2006 +1100 remove do munmap 4 args diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index bdbb31f..3126796 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -39,12 +39,6 @@ #include "i810_drm.h" #include "i810_drv.h" -#ifdef DO_MUNMAP_4_ARGS -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) -#else -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) -#endif - #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 #define I810_BUF_HARDWARE 0 @@ -186,7 +180,7 @@ static int i810_unmap_buffer(drm_buf_t * return -EINVAL; down_write(¤t->mm->mmap_sem); - retcode = DO_MUNMAP(current->mm, + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, (size_t) buf->total); up_write(¤t->mm->mmap_sem); diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c index 4526ccf..a1b7079 100644 --- a/linux-core/i830_dma.c +++ b/linux-core/i830_dma.c @@ -41,12 +41,6 @@ #include "i830_drm.h" #include "i830_drv.h" -#ifdef DO_MUNMAP_4_ARGS -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) -#else -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) -#endif - #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 #define I830_BUF_HARDWARE 0 @@ -174,7 +168,7 @@ static int i830_unmap_buffer(drm_buf_t * return -EINVAL; down_write(¤t->mm->mmap_sem); - retcode = DO_MUNMAP(current->mm, + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, (size_t) buf->total); up_write(¤t->mm->mmap_sem); diff-tree 7458909beae274198ca2a29b510a808ce2feca0a (from 98799f862e58e02ef07f0d0db0863e7c91c7e5b0) Author: Dave Airlie <ai...@li...> Date: Tue Dec 19 21:48:18 2006 +1100 fixup i915 return values from kernel diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 60e3e94..3373f1b 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -263,7 +263,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) retcode = i915_dma_resume(dev); break; default: - retcode = -EINVAL; + retcode = DRM_ERR(EINVAL); break; } @@ -360,10 +360,9 @@ static int i915_emit_cmds(drm_device_t * for (i = 0; i < dwords;) { int cmd, sz; - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { - + if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) return DRM_ERR(EINVAL); - } + if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return DRM_ERR(EINVAL); @@ -395,7 +394,7 @@ static int i915_emit_box(drm_device_t * RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return EFAULT; + return DRM_ERR(EFAULT); } if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { diff-tree 98799f862e58e02ef07f0d0db0863e7c91c7e5b0 (from 6333bfdb58b300494b2ec6f7b5a3ea5b392a210a) Author: Dave Airlie <ai...@li...> Date: Tue Dec 19 21:48:06 2006 +1100 fixup i915 defines in create script diff --git a/scripts/create_lk_drm.sh b/scripts/create_lk_drm.sh index 2f00d7a..e2cef8d 100755 --- a/scripts/create_lk_drm.sh +++ b/scripts/create_lk_drm.sh @@ -40,4 +40,10 @@ do unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp mv $i.tmp $i done + +for i in i915*.[ch] +do +unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp +mv $i.tmp $i +done cd - |
From: <da...@ke...> - 2007-01-02 09:06:03
|
.gitignore | 2 ++ linux-core/.gitignore | 1 + shared-core/i915_irq.c | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) New commits: diff-tree 4fe2858f53c6ea542cd81961ebdad118acfc8f32 (from 176b62991ad59e9a03a8416db8945d5e37ab0406) Author: Michel Dänzer <mi...@tu...> Date: Tue Jan 2 10:05:48 2007 +0100 i915: Fix a DRM_ERROR that should be DRM_DEBUG. It would clutter up the kernel output in a situation which is legitimate before X.org 7.2 and handled correctly by the 3D driver. diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index a9bbf97..9772365 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -532,7 +532,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (!drm_get_drawable_info(dev, swap.drawable)) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); + DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); return DRM_ERR(EINVAL); } diff-tree 176b62991ad59e9a03a8416db8945d5e37ab0406 (from 972074b5d618575d9291de50ffe12f2f4ca01a20) Author: Michel Dänzer <mi...@tu...> Date: Tue Jan 2 10:03:56 2007 +0100 Make git ignore Emacs style backup files and cscope files. diff --git a/.gitignore b/.gitignore index 4386f40..d38beb7 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,7 @@ bsd-core/via_map.c bsd-core/via_verifier.c bsd-core/via_verifier.h bsd-core/via_video.c +*~ *.flags *.ko *.ko.cmd @@ -74,6 +75,7 @@ config.log config.status config.sub configure +cscope.* depcomp device_if.h drm.kld diff-tree 972074b5d618575d9291de50ffe12f2f4ca01a20 (from 91855bb2540bbb824d4d5d437f3eb2d5d06c11ba) Author: Michel Dänzer <mi...@tu...> Date: Tue Jan 2 10:02:44 2007 +0100 linux-core: Make git ignore generated module symbol version files. diff --git a/linux-core/.gitignore b/linux-core/.gitignore new file mode 100644 index 0000000..1d045d6 --- /dev/null +++ b/linux-core/.gitignore @@ -0,0 +1 @@ +Module*.symvers |
From: <ma...@ke...> - 2007-01-13 22:22:19
|
linux-core/Makefile.kernel | 3 linux-core/nv10_graph.c | 1 linux-core/nv20_graph.c | 1 shared-core/nouveau_drv.h | 14 - shared-core/nouveau_fifo.c | 32 +- shared-core/nouveau_irq.c | 34 -- shared-core/nouveau_reg.h | 109 ++++++-- shared-core/nouveau_state.c | 6 shared-core/nv10_graph.c | 594 ++++++++++++++++++++++++++++++++++++++++++++ shared-core/nv20_graph.c | 155 +++++++++++ 10 files changed, 891 insertions(+), 58 deletions(-) New commits: diff-tree f04347f371c6c9c3a47550c6b7d26b7bd5629c85 (from cd5f543b2f3d6dd4c45f676c6fb9848b4d8a1c33) Author: Matthieu Castet <mat@mat-pc.(none)> Date: Sat Jan 13 23:19:41 2007 +0100 nouveau: nv20 graph ctx switch. Untested... diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 1bfffa0..b531a70 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,7 +22,8 @@ i830-objs := i830_drv.o i830_dma.o i83 i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nv10_graph.o nv30_graph.o nv40_graph.o + nouveau_object.o nouveau_irq.o nv10_graph.o nv30_graph.o nv40_graph.o \ + nv20_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv20_graph.c b/linux-core/nv20_graph.c new file mode 120000 index 0000000..7304991 --- /dev/null +++ b/linux-core/nv20_graph.c @@ -0,0 +1 @@ +../shared-core/nv20_graph.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 9466cdc..4978c47 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -207,6 +207,11 @@ extern void nouveau_nv10_context_switch( extern int nv10_graph_init(drm_device_t *dev); extern int nv10_graph_context_create(drm_device_t *dev, int channel); +/* nv20_graph.c */ +extern void nouveau_nv20_context_switch(drm_device_t *dev); +extern int nv20_graph_init(drm_device_t *dev); +extern int nv20_graph_context_create(drm_device_t *dev, int channel); + /* nv30_graph.c */ extern int nv30_graph_init(drm_device_t *dev); extern int nv30_graph_context_create(drm_device_t *dev, int channel); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index cb94afc..1e2870f 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -550,15 +550,22 @@ static int nouveau_fifo_alloc(drm_device /* Construct inital RAMFC for new channel */ if (dev_priv->card_type < NV_10) { nouveau_nv04_context_init(dev, init); - } else if (dev_priv->card_type < NV_30) { + } else if (dev_priv->card_type < NV_20) { nv10_graph_context_create(dev, init->channel); nouveau_nv10_context_init(dev, init); - } else if (dev_priv->card_type < NV_40) { - ret = nv30_graph_context_create(dev, init->channel); - if (ret) { - nouveau_fifo_free(dev, init->channel); - return ret; - } + } else if (dev_priv->card_type < NV_30) { + ret = nv20_graph_context_create(dev, init->channel); + if (ret) { + nouveau_fifo_free(dev, init->channel); + return ret; + } + nouveau_nv10_context_init(dev, init); + } else if (dev_priv->card_type < NV_40) { + ret = nv30_graph_context_create(dev, init->channel); + if (ret) { + nouveau_fifo_free(dev, init->channel); + return ret; + } nouveau_nv30_context_init(dev, init); } else { ret = nv40_graph_context_create(dev, init->channel); @@ -652,6 +659,13 @@ void nouveau_fifo_free(drm_device_t* dev if (dev_priv->card_type >= NV_40) nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + else if (dev_priv->card_type >= NV_30) { + } + else if (dev_priv->card_type >= NV_20) { + /* clear ctx table */ + INSTANCE_WR(dev_priv->ctx_table, n, 0); + nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + } /* reenable the fifo caches */ NV_WRITE(NV_PFIFO_CACHES, 0x00000001); diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 638b094..a92b816 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -329,6 +329,9 @@ static void nouveau_pgraph_irq_handler(d case NV_10: nouveau_nv10_context_switch(dev); break; + case NV_20: + nouveau_nv20_context_switch(dev); + break; default: DRM_INFO("NV: Context switch not implemented\n"); break; diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 8058e98..543be69 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -140,6 +140,8 @@ #define NV_PGRAPH_FIFO 0x00400720 #define NV_PGRAPH_BPIXEL 0x00400724 +#define NV_PGRAPH_RDI_INDEX 0x00400750 +#define NV_PGRAPH_RDI_DATA 0x00400754 #define NV_PGRAPH_FFINTFC_ST2 0x00400764 #define NV_PGRAPH_DMA_PITCH 0x00400770 #define NV_PGRAPH_DVD_COLORFMT 0x00400774 diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 6448512..f324c5f 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -96,7 +96,11 @@ int nouveau_firstopen(struct drm_device /* FIXME: doesn't belong here, and have no idea what it's for.. */ if (dev_priv->card_type >= NV_40) nv40_graph_init(dev); - else if (dev_priv->card_type == NV_10) + else if (dev_priv->card_type >= NV_30) { + } + else if (dev_priv->card_type >= NV_20) + nv20_graph_init(dev); + else if (dev_priv->card_type >= NV_10) nv10_graph_init(dev); return 0; diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c new file mode 100644 index 0000000..6d00eff --- /dev/null +++ b/shared-core/nv20_graph.c @@ -0,0 +1,155 @@ +/* + * Copyright 2007 Matthieu CASTET <cas...@fr...> + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +#define NV20_GRCTX_SIZE (3529) + +int nv20_graph_context_create(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + unsigned int ctx_size = NV20_GRCTX_SIZE; + int i; + + /* Alloc and clear RAMIN to store the context */ + chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); + if (!chan->ramin_grctx) + return DRM_ERR(ENOMEM); + for (i=0; i<ctx_size; i+=4) + INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000); + + /* Initialise default context values */ + INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */ + + INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx)); + + return 0; +} + +static void nv20_graph_rdi(drm_device_t *dev) { + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + int i; + + NV_WRITE(NV_PGRAPH_RDI_INDEX, 0x2c80000); + for (i = 0; i < 32; i++) + NV_WRITE(NV_PGRAPH_RDI_DATA, 0); + + nouveau_wait_for_idle(dev); +} + +/* Save current context (from PGRAPH) into the channel's context + */ +static void nv20_graph_context_save_current(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t instance; + + instance = INSTANCE_RD(dev_priv->ctx_table, channel); + if (!instance) { + return; + } + if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) + DRM_ERROR("nv20_graph_context_save_current : bad instance\n"); + + NV_WRITE(NV_PGRAPH_CHANNEL_CTX_SIZE, instance); + NV_WRITE(NV_PGRAPH_CHANNEL_CTX_POINTER, 2 /* save ctx */); +} + + +/* Restore the context for a specific channel into PGRAPH + */ +static void nv20_graph_context_restore(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t instance; + + instance = INSTANCE_RD(dev_priv->ctx_table, channel); + if (!instance) { + return; + } + if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx)) + DRM_ERROR("nv20_graph_context_restore_current : bad instance\n"); + + NV_WRITE(NV_PGRAPH_CTX_USER, channel << 24); + NV_WRITE(NV_PGRAPH_CHANNEL_CTX_SIZE, instance); + NV_WRITE(NV_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); +} + +void nouveau_nv20_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int channel, channel_old; + + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + channel_old = (NV_READ(NV_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + + NV_WRITE(NV_PGRAPH_FIFO,0x0); + + nv20_graph_context_save_current(dev, channel_old); + + nouveau_wait_for_idle(dev); + + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000000); + + nv20_graph_context_restore(dev, channel_old); + + nouveau_wait_for_idle(dev); + + if ((NV_READ(NV_PGRAPH_CTX_USER) >> 24) != channel) + DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", channel, NV_READ(NV_PGRAPH_CTX_USER) >> 24); + + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10010100); + NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); + + NV_WRITE(NV_PGRAPH_FIFO,0x1); +} + +int nv20_graph_init(drm_device_t *dev) { + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + int i; + + /* Create Context Pointer Table */ + dev_priv->ctx_table_size = 32 * 4; + dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4); + if (!dev_priv->ctx_table) + return DRM_ERR(ENOMEM); + + for (i=0; i< dev_priv->ctx_table_size; i+=4) + INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000); + + NV_WRITE(NV_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table)); + + //XXX need to be done and save/restore for each fifo ??? + nv20_graph_rdi(dev); + + return 0; +} diff-tree cd5f543b2f3d6dd4c45f676c6fb9848b4d8a1c33 (from 4ae64a1b583be3ef13338e8029e7e9efe21f2c2f) Author: Matthieu Castet <mat@mat-pc.(none)> Date: Sat Jan 13 21:43:47 2007 +0100 nouveau: first step to make graph ctx works It is still not working, but now we could use some 3D commands without needed to run nvidia blob before. diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 30df582..1bfffa0 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,7 +22,7 @@ i830-objs := i830_drv.o i830_dma.o i83 i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nv30_graph.o nv40_graph.o + nouveau_object.o nouveau_irq.o nv10_graph.o nv30_graph.o nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv10_graph.c b/linux-core/nv10_graph.c new file mode 120000 index 0000000..0d5a0eb --- /dev/null +++ b/linux-core/nv10_graph.c @@ -0,0 +1 @@ +../shared-core/nv10_graph.c \ No newline at end of file diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 219ba12..9466cdc 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -90,8 +90,8 @@ struct nouveau_fifo /* objects belonging to this fifo */ struct nouveau_object *objs; - /* XXX move this in PGRAPH struct */ - uint32_t pgraph_ctx_user; + /* XXX dynamic alloc ? */ + uint32_t nv10_pgraph_ctx [340]; }; struct nouveau_config { @@ -202,6 +202,11 @@ extern void nouveau_irq_preinstal extern void nouveau_irq_postinstall(drm_device_t*); extern void nouveau_irq_uninstall(drm_device_t*); +/* nv10_graph.c */ +extern void nouveau_nv10_context_switch(drm_device_t *dev); +extern int nv10_graph_init(drm_device_t *dev); +extern int nv10_graph_context_create(drm_device_t *dev, int channel); + /* nv30_graph.c */ extern int nv30_graph_init(drm_device_t *dev); extern int nv30_graph_context_create(drm_device_t *dev, int channel); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 98a1344..cb94afc 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -538,7 +538,6 @@ static int nouveau_fifo_alloc(drm_device init->channel = i; init->put_base = 0; dev_priv->cur_fifo = init->channel; - dev_priv->fifos[i].pgraph_ctx_user = i << 24; nouveau_wait_for_idle(dev); @@ -551,8 +550,9 @@ static int nouveau_fifo_alloc(drm_device /* Construct inital RAMFC for new channel */ if (dev_priv->card_type < NV_10) { nouveau_nv04_context_init(dev, init); - } else if (dev_priv->card_type < NV_30) { - nouveau_nv10_context_init(dev, init); + } else if (dev_priv->card_type < NV_30) { + nv10_graph_context_create(dev, init->channel); + nouveau_nv10_context_init(dev, init); } else if (dev_priv->card_type < NV_40) { ret = nv30_graph_context_create(dev, init->channel); if (ret) { diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 7a31fb0..638b094 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -237,37 +237,6 @@ static void nouveau_nv04_context_switch( } -static void nouveau_nv10_context_switch(drm_device_t *dev) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - int channel, channel_old; - - channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); - channel_old = (NV_READ(NV_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); - - DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); - - NV_WRITE(NV_PGRAPH_FIFO,0x0); - NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); - NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); - NV_WRITE(NV_PFIFO_CACHES, 0x00000000); - - dev_priv->fifos[channel_old].pgraph_ctx_user = NV_READ(NV_PGRAPH_CTX_USER); - //XXX save PGRAPH context - NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000000); - NV_WRITE(NV_PGRAPH_CTX_USER, dev_priv->fifos[channel].pgraph_ctx_user); - //XXX restore PGRAPH context - printk("ctx_user %x %x\n", dev_priv->fifos[channel_old].pgraph_ctx_user, dev_priv->fifos[channel].pgraph_ctx_user); - - NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); - NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10010100); - - NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); - NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); - NV_WRITE(NV_PFIFO_CACHES, 0x00000001); - NV_WRITE(NV_PGRAPH_FIFO,0x1); -} - static void nouveau_pgraph_irq_handler(drm_device_t *dev) { uint32_t status; diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 1e0587f..6448512 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -96,6 +96,8 @@ int nouveau_firstopen(struct drm_device /* FIXME: doesn't belong here, and have no idea what it's for.. */ if (dev_priv->card_type >= NV_40) nv40_graph_init(dev); + else if (dev_priv->card_type == NV_10) + nv10_graph_init(dev); return 0; } diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c new file mode 100644 index 0000000..ccbb34d --- /dev/null +++ b/shared-core/nv10_graph.c @@ -0,0 +1,594 @@ +/* + * Copyright 2007 Matthieu CASTET <cas...@fr...> + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" + + +static void nv10_praph_pipe(drm_device_t *dev) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + nouveau_wait_for_idle(dev); + /* XXX check haiku comments */ + NV_WRITE(NV_PGRAPH_XFMODE0, 0x10000000); + NV_WRITE(NV_PGRAPH_XFMODE1, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x000064c0); + for (i = 0; i < 4; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 4; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00006ab0); + + for (i = 0; i < 3; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00006a80); + for (i = 0; i < 3; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00000040); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000008); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00000200); + for (i = 0; i < 48; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + nouveau_wait_for_idle(dev); + + NV_WRITE(NV_PGRAPH_XFMODE0, 0x00000000); + NV_WRITE(NV_PGRAPH_XFMODE1, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00006400); + for (i = 0; i < 211; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x40000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x40000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x40000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x40000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00006800); + for (i = 0; i < 162; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x3f800000); + for (i = 0; i < 25; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00006c00); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0xbf800000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00007000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x7149f2ca); + for (i = 0; i < 35; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00007400); + for (i = 0; i < 48; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00007800); + for (i = 0; i < 48; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00004400); + for (i = 0; i < 32; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00000000); + for (i = 0; i < 16; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + NV_WRITE(NV_PGRAPH_PIPE_ADDRESS, 0x00000040); + for (i = 0; i < 4; i++) + NV_WRITE(NV_PGRAPH_PIPE_DATA, 0x00000000); + + nouveau_wait_for_idle(dev); +} + +/* TODO replace address with name + use loops */ +static int nv10_graph_ctx_regs [] = { +NV_PGRAPH_XY_LOGIC_MISC0, +NV_PGRAPH_DEBUG_4, +0x004006b0, + +NV_PGRAPH_CTX_SWITCH1, +NV_PGRAPH_CTX_SWITCH2, +NV_PGRAPH_CTX_SWITCH3, +NV_PGRAPH_CTX_SWITCH4, +NV_PGRAPH_CTX_SWITCH5, +0x00400160, +0x00400180, +0x004001a0, +0x004001c0, +0x004001e0, +0x00400164, +0x00400184, +0x004001a4, +0x004001c4, +0x004001e4, +0x00400168, +0x00400188, +0x004001a8, +0x004001c8, +0x004001e8, +0x0040016c, +0x0040018c, +0x004001ac, +0x004001cc, +0x004001ec, +0x00400170, +0x00400190, +0x004001b0, +0x004001d0, +0x004001f0, +0x00400174, +0x00400194, +0x004001b4, +0x004001d4, +0x004001f4, +0x00400178, +0x00400198, +0x004001b8, +0x004001d8, +0x004001f8, +0x0040017c, +0x0040019c, +0x004001bc, +0x004001dc, +0x004001fc, +NV_PGRAPH_CTX_USER, +NV_PGRAPH_DMA_START_0, +NV_PGRAPH_DMA_START_1, +NV_PGRAPH_DMA_LENGTH, +NV_PGRAPH_DMA_MISC, +NV_PGRAPH_DMA_PITCH, +NV_PGRAPH_BOFFSET0, +NV_PGRAPH_BBASE0, +NV_PGRAPH_BLIMIT0, +NV_PGRAPH_BOFFSET1, +NV_PGRAPH_BBASE1, +NV_PGRAPH_BLIMIT1, +NV_PGRAPH_BOFFSET2, +NV_PGRAPH_BBASE2, +NV_PGRAPH_BLIMIT2, +NV_PGRAPH_BOFFSET3, +NV_PGRAPH_BBASE3, +NV_PGRAPH_BLIMIT3, +NV_PGRAPH_BOFFSET4, +NV_PGRAPH_BBASE4, +NV_PGRAPH_BLIMIT4, +NV_PGRAPH_BOFFSET5, +NV_PGRAPH_BBASE5, +NV_PGRAPH_BLIMIT5, +NV_PGRAPH_BPITCH0, +NV_PGRAPH_BPITCH1, +NV_PGRAPH_BPITCH2, +NV_PGRAPH_BPITCH3, +NV_PGRAPH_BPITCH4, +NV_PGRAPH_SURFACE, +NV_PGRAPH_STATE, +NV_PGRAPH_BSWIZZLE2, +NV_PGRAPH_BSWIZZLE5, +NV_PGRAPH_BPIXEL, +NV_PGRAPH_NOTIFY, +NV_PGRAPH_PATT_COLOR0, +NV_PGRAPH_PATT_COLOR1, +0x00400900, +0x00400904, +0x00400908, +0x0040090c, +0x00400910, +0x00400914, +0x00400918, +0x0040091c, +0x00400920, +0x00400924, +0x00400928, +0x0040092c, +0x00400930, +0x00400934, +0x00400938, +0x0040093c, +0x00400940, +0x00400944, +0x00400948, +0x0040094c, +0x00400950, +0x00400954, +0x00400958, +0x0040095c, +0x00400960, +0x00400964, +0x00400968, +0x0040096c, +0x00400970, +0x00400974, +0x00400978, +0x0040097c, +0x00400980, +0x00400984, +0x00400988, +0x0040098c, +0x00400990, +0x00400994, +0x00400998, +0x0040099c, +0x004009a0, +0x004009a4, +0x004009a8, +0x004009ac, +0x004009b0, +0x004009b4, +0x004009b8, +0x004009bc, +0x004009c0, +0x004009c4, +0x004009c8, +0x004009cc, +0x004009d0, +0x004009d4, +0x004009d8, +0x004009dc, +0x004009e0, +0x004009e4, +0x004009e8, +0x004009ec, +0x004009f0, +0x004009f4, +0x004009f8, +0x004009fc, +0x00400808, +0x0040080c, +NV_PGRAPH_PATTERN_SHAPE, +NV_PGRAPH_MONO_COLOR0, +NV_PGRAPH_ROP3, +NV_PGRAPH_CHROMA, +NV_PGRAPH_BETA_AND, +NV_PGRAPH_BETA_PREMULT, +0x00400e70, +0x00400e74, +0x00400e78, +0x00400e7c, +0x00400e80, +0x00400e84, +0x00400e88, +0x00400e8c, +0x00400ea0, +0x00400ea4, +0x00400ea8, +0x00400eac, +0x00400eb0, +0x00400eb4, +0x00400eb8, +0x00400ebc, +0x00400ec0, +0x00400ec4, +0x00400ec8, +0x00400ecc, +0x00400ed0, +0x00400ed4, +0x00400ed8, +0x00400edc, +0x00400ee0, +0x00400a00, +0x00400a04, +0x00400e90, +0x00400e94, +0x00400e98, +0x00400e9c, +0x00400f00, +0x00400f20, +0x00400f04, +0x00400f24, +0x00400f08, +0x00400f28, +0x00400f0c, +0x00400f2c, +0x00400f10, +0x00400f30, +0x00400f14, +0x00400f34, +0x00400f18, +0x00400f38, +0x00400f1c, +0x00400f3c, +NV_PGRAPH_XFMODE0, +NV_PGRAPH_XFMODE1, +NV_PGRAPH_GLOBALSTATE0, +NV_PGRAPH_GLOBALSTATE1, +NV_PGRAPH_STORED_FMT, +NV_PGRAPH_SOURCE_COLOR, +0x00400400, +0x00400480, +0x00400404, +0x00400484, +0x00400408, +0x00400488, +0x0040040c, +0x0040048c, +0x00400410, +0x00400490, +0x00400414, +0x00400494, +0x00400418, +0x00400498, +0x0040041c, +0x0040049c, +0x00400420, +0x004004a0, +0x00400424, +0x004004a4, +0x00400428, +0x004004a8, +0x0040042c, +0x004004ac, +0x00400430, +0x004004b0, +0x00400434, +0x004004b4, +0x00400438, +0x004004b8, +0x0040043c, +0x004004bc, +0x00400440, +0x004004c0, +0x00400444, +0x004004c4, +0x00400448, +0x004004c8, +0x0040044c, +0x004004cc, +0x00400450, +0x004004d0, +0x00400454, +0x004004d4, +0x00400458, +0x004004d8, +0x0040045c, +0x004004dc, +0x00400460, +0x004004e0, +0x00400464, +0x004004e4, +0x00400468, +0x004004e8, +0x0040046c, +0x004004ec, +0x00400470, +0x004004f0, +0x00400474, +0x004004f4, +0x00400478, +0x004004f8, +0x0040047c, +0x004004fc, +NV_PGRAPH_ABS_UCLIP_XMIN, +NV_PGRAPH_ABS_UCLIP_XMAX, +NV_PGRAPH_ABS_UCLIP_YMIN, +NV_PGRAPH_ABS_UCLIP_YMAX, +0x00400550, +0x00400558, +0x00400554, +0x0040055c, +NV_PGRAPH_ABS_UCLIPA_XMIN, +NV_PGRAPH_ABS_UCLIPA_XMAX, +NV_PGRAPH_ABS_UCLIPA_YMIN, +NV_PGRAPH_ABS_UCLIPA_YMAX, +NV_PGRAPH_ABS_ICLIP_XMAX, +NV_PGRAPH_ABS_ICLIP_YMAX, +NV_PGRAPH_XY_LOGIC_MISC1, +NV_PGRAPH_XY_LOGIC_MISC2, +NV_PGRAPH_XY_LOGIC_MISC3, +NV_PGRAPH_CLIPX_0, +NV_PGRAPH_CLIPX_1, +NV_PGRAPH_CLIPY_0, +NV_PGRAPH_CLIPY_1, +0x00400e40, +0x00400e44, +0x00400e48, +0x00400e4c, +0x00400e50, +0x00400e54, +0x00400e58, +0x00400e5c, +0x00400e60, +0x00400e64, +0x00400e68, +0x00400e6c, +0x00400e00, +0x00400e04, +0x00400e08, +0x00400e0c, +0x00400e10, +0x00400e14, +0x00400e18, +0x00400e1c, +0x00400e20, +0x00400e24, +0x00400e28, +0x00400e2c, +0x00400e30, +0x00400e34, +0x00400e38, +0x00400e3c, +NV_PGRAPH_PASSTHRU_0, +NV_PGRAPH_PASSTHRU_1, +NV_PGRAPH_PASSTHRU_2, +NV_PGRAPH_DIMX_TEXTURE, +NV_PGRAPH_WDIMX_TEXTURE, +NV_PGRAPH_DVD_COLORFMT, +NV_PGRAPH_SCALED_FORMAT, +NV_PGRAPH_MISC24_0, +NV_PGRAPH_MISC24_1, +NV_PGRAPH_MISC24_2, +NV_PGRAPH_X_MISC, +NV_PGRAPH_Y_MISC, +NV_PGRAPH_VALID1, +NV_PGRAPH_VALID2, +0 +}; + +void nouveau_nv10_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int channel, channel_old, i; + + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + channel_old = (NV_READ(NV_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + + NV_WRITE(NV_PGRAPH_FIFO,0x0); +#if 0 + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); +#endif + + // save PGRAPH context + for (i = 0; nv10_graph_ctx_regs[i]; i++) + dev_priv->fifos[channel_old].nv10_pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]); + + nouveau_wait_for_idle(dev); + + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000000); + NV_WRITE(NV_PGRAPH_CTX_USER, (NV_READ(NV_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24)); + + nouveau_wait_for_idle(dev); + // restore PGRAPH context + //XXX not working yet +#if 0 + for (i = 0; nv10_graph_ctx_regs[i]; i++) + NV_WRITE(nv10_graph_ctx_regs[i], dev_priv->fifos[channel].nv10_pgraph_ctx[i]); + nouveau_wait_for_idle(dev); +#endif + + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10010100); + NV_WRITE(NV_PGRAPH_CTX_USER, channel << 24); + NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); + +#if 0 + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); +#endif + NV_WRITE(NV_PGRAPH_FIFO,0x1); +} + +int nv10_graph_context_create(drm_device_t *dev, int channel) { + drm_nouveau_private_t *dev_priv = dev->dev_private; + DRM_DEBUG("nv10_graph_context_create %d\n", channel); + + memset(dev_priv->fifos[channel].nv10_pgraph_ctx, 0, sizeof(dev_priv->fifos[channel].nv10_pgraph_ctx)); + + //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; + dev_priv->fifos[channel].nv10_pgraph_ctx[0] = 0x0001ffff; + /* is it really needed ??? */ + dev_priv->fifos[channel].nv10_pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); + dev_priv->fifos[channel].nv10_pgraph_ctx[2] = NV_READ(0x004006b0); + return 0; +} + + +int nv10_graph_init(drm_device_t *dev) { + //XXX should be call at each fifo init + nv10_praph_pipe(dev); + return 0; +} diff-tree 4ae64a1b583be3ef13338e8029e7e9efe21f2c2f (from 1967aa82cfc18c422360ef544b66e316d98f53a1) Author: Matthieu Castet <mat@mat-pc.(none)> Date: Sat Jan 13 21:41:33 2007 +0100 nouveau: add and indent pgraph regs diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index a1d189f..8058e98 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -55,6 +55,7 @@ #define NV_PMC_INTEN 0x00000140 # define NV_PMC_INTEN_MASTER_ENABLE (1<< 0) +#define NV_PGRAPH_DEBUG_4 0x00400090 #define NV_PGRAPH_INTSTAT 0x00400100 #define NV04_PGRAPH_INTEN 0x00400140 #define NV40_PGRAPH_INTEN 0x0040013C @@ -68,23 +69,99 @@ # define NV_PGRAPH_NV40_UNK220_FB_INSTANCE #define NV_PGRAPH_CTX_USER 0x00400148 #define NV_PGRAPH_CTX_SWITCH1 0x0040014C +#define NV_PGRAPH_CTX_SWITCH2 0x00400150 +#define NV_PGRAPH_CTX_SWITCH3 0x00400154 +#define NV_PGRAPH_CTX_SWITCH4 0x00400158 +#define NV_PGRAPH_CTX_SWITCH5 0x0040015C +#define NV_PGRAPH_X_MISC 0x00400500 +#define NV_PGRAPH_Y_MISC 0x00400504 +#define NV_PGRAPH_VALID1 0x00400508 +#define NV_PGRAPH_SOURCE_COLOR 0x0040050C +#define NV_PGRAPH_MISC24_0 0x00400510 +#define NV_PGRAPH_XY_LOGIC_MISC0 0x00400514 +#define NV_PGRAPH_XY_LOGIC_MISC1 0x00400518 +#define NV_PGRAPH_XY_LOGIC_MISC2 0x0040051C +#define NV_PGRAPH_XY_LOGIC_MISC3 0x00400520 +#define NV_PGRAPH_CLIPX_0 0x00400524 +#define NV_PGRAPH_CLIPX_1 0x00400528 +#define NV_PGRAPH_CLIPY_0 0x0040052C +#define NV_PGRAPH_CLIPY_1 0x00400530 +#define NV_PGRAPH_ABS_ICLIP_XMAX 0x00400534 +#define NV_PGRAPH_ABS_ICLIP_YMAX 0x00400538 +#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C +#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540 +#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544 +#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548 +#define NV_PGRAPH_ABS_UCLIPA_XMIN 0x00400560 +#define NV_PGRAPH_ABS_UCLIPA_YMIN 0x00400564 +#define NV_PGRAPH_ABS_UCLIPA_XMAX 0x00400568 +#define NV_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C +#define NV_PGRAPH_MISC24_1 0x00400570 +#define NV_PGRAPH_MISC24_2 0x00400574 +#define NV_PGRAPH_VALID2 0x00400578 +#define NV_PGRAPH_PASSTHRU_0 0x0040057C +#define NV_PGRAPH_PASSTHRU_1 0x00400580 +#define NV_PGRAPH_PASSTHRU_2 0x00400584 +#define NV_PGRAPH_DIMX_TEXTURE 0x00400588 +#define NV_PGRAPH_WDIMX_TEXTURE 0x0040058C +#define NV_PGRAPH_MONO_COLOR0 0x00400600 +#define NV_PGRAPH_ROP3 0x00400604 +#define NV_PGRAPH_BETA_AND 0x00400608 +#define NV_PGRAPH_BETA_PREMULT 0x0040060C +#define NV_PGRAPH_BOFFSET0 0x00400640 +#define NV_PGRAPH_BOFFSET1 0x00400644 +#define NV_PGRAPH_BOFFSET2 0x00400648 +#define NV_PGRAPH_BOFFSET3 0x0040064C +#define NV_PGRAPH_BOFFSET4 0x00400650 +#define NV_PGRAPH_BOFFSET5 0x00400654 +#define NV_PGRAPH_BBASE0 0x00400658 +#define NV_PGRAPH_BBASE1 0x0040065C +#define NV_PGRAPH_BBASE2 0x00400660 +#define NV_PGRAPH_BBASE3 0x00400664 +#define NV_PGRAPH_BBASE4 0x00400668 +#define NV_PGRAPH_BBASE5 0x0040066C +#define NV_PGRAPH_BPITCH0 0x00400670 +#define NV_PGRAPH_BPITCH1 0x00400674 +#define NV_PGRAPH_BPITCH2 0x00400678 +#define NV_PGRAPH_BPITCH3 0x0040067C +#define NV_PGRAPH_BPITCH4 0x00400680 +#define NV_PGRAPH_BLIMIT0 0x00400684 +#define NV_PGRAPH_BLIMIT1 0x00400688 +#define NV_PGRAPH_BLIMIT2 0x0040068C +#define NV_PGRAPH_BLIMIT3 0x00400690 +#define NV_PGRAPH_BLIMIT4 0x00400694 +#define NV_PGRAPH_BLIMIT5 0x00400698 +#define NV_PGRAPH_BSWIZZLE2 0x0040069C +#define NV_PGRAPH_BSWIZZLE5 0x004006A0 +#define NV_PGRAPH_SURFACE 0x00400710 +#define NV_PGRAPH_STATE 0x00400714 +#define NV_PGRAPH_NOTIFY 0x00400718 + #define NV_PGRAPH_FIFO 0x00400720 -#define NV_PGRAPH_FFINTFC_ST2 0x00400764 -/* NV-Register NV_PGRAPH_CHANNEL_CTX_TABLE */ -#define NV_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 -#define NV_PGRAPH_CHANNEL_CTX_TABLE_INST 0x0000FFFF -#define NV_PGRAPH_CHANNEL_CTX_TABLE_INST_0 0x00000000 - -/* NV-Register NV_PGRAPH_CHANNEL_CTX_SIZE */ -#define NV_PGRAPH_CHANNEL_CTX_SIZE 0x00400784 -#define NV_PGRAPH_CHANNEL_CTX_SIZE_VALUE 0x0000FFFF -#define NV_PGRAPH_CHANNEL_CTX_SIZE_VALUE_INIT 0x00001000 - -/* NV-Register NV_PGRAPH_CHANNEL_CTX_POINTER */ -#define NV_PGRAPH_CHANNEL_CTX_POINTER 0x00400788 -#define NV_PGRAPH_CHANNEL_CTX_POINTER_INST 0x0000FFFF -#define NV_PGRAPH_CHANNEL_CTX_POINTER_INST_0 0x00000000 +#define NV_PGRAPH_BPIXEL 0x00400724 +#define NV_PGRAPH_FFINTFC_ST2 0x00400764 +#define NV_PGRAPH_DMA_PITCH 0x00400770 +#define NV_PGRAPH_DVD_COLORFMT 0x00400774 +#define NV_PGRAPH_SCALED_FORMAT 0x00400778 +#define NV_PGRAPH_CHANNEL_CTX_TABLE 0x00400780 +#define NV_PGRAPH_CHANNEL_CTX_SIZE 0x00400784 +#define NV_PGRAPH_CHANNEL_CTX_POINTER 0x00400788 +#define NV_PGRAPH_PATT_COLOR0 0x00400800 +#define NV_PGRAPH_PATT_COLOR1 0x00400804 +#define NV_PGRAPH_PATTERN_SHAPE 0x00400810 +#define NV_PGRAPH_CHROMA 0x00400814 +#define NV_PGRAPH_STORED_FMT 0x00400830 +#define NV_PGRAPH_XFMODE0 0x00400F40 +#define NV_PGRAPH_XFMODE1 0x00400F44 +#define NV_PGRAPH_GLOBALSTATE0 0x00400F48 +#define NV_PGRAPH_GLOBALSTATE1 0x00400F4C +#define NV_PGRAPH_PIPE_ADDRESS 0x00400F50 +#define NV_PGRAPH_PIPE_DATA 0x00400F54 +#define NV_PGRAPH_DMA_START_0 0x00401000 +#define NV_PGRAPH_DMA_START_1 0x00401004 +#define NV_PGRAPH_DMA_LENGTH 0x00401008 +#define NV_PGRAPH_DMA_MISC 0x0040100C /* It's a guess that this works on NV03. Confirmed on NV04, though */ #define NV_PFIFO_DELAY_0 0x00002040 |
From: <th...@ke...> - 2007-01-29 12:37:35
|
linux-core/drmP.h | 7 - linux-core/drm_bo.c | 211 +++++++++++++++++----------------------------------- 2 files changed, 75 insertions(+), 143 deletions(-) New commits: diff-tree 9a654e71bda3530f6d18d115729af27cc15033de (from 45418bb1b1a0fac38f0dda7e29022bfb4cae3d03) Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Mon Jan 29 13:36:17 2007 +0100 Use pre-defined list_splice function. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1312783..cd0f476 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -381,7 +381,8 @@ int drm_fence_buffer_objects(drm_file_t uint32_t fence_type = 0; int count = 0; int ret = 0; - struct list_head f_list, *l; + struct list_head *l; + LIST_HEAD(f_list); mutex_lock(&dev->struct_mutex); @@ -411,8 +412,7 @@ int drm_fence_buffer_objects(drm_file_t * the ones we already have.. */ - list_add_tail(&f_list, list); - list_del_init(list); + list_splice_init(list, &f_list); if (fence) { if ((fence_type & fence->type) != fence_type) { diff-tree 45418bb1b1a0fac38f0dda7e29022bfb4cae3d03 (from 1e4c7d69f5b55f5299e5b0c220e4af1dfb21f69d) Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Mon Jan 29 13:19:20 2007 +0100 s/buf/bo/ for consistency. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 61db102..1312783 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -68,28 +68,28 @@ static inline uint32_t drm_bo_type_flags * bo locked. dev->struct_mutex locked. */ -static void drm_bo_add_to_lru(drm_buffer_object_t * buf, +static void drm_bo_add_to_lru(drm_buffer_object_t * bo, drm_buffer_manager_t * bm) { struct list_head *list; - buf->mem_type = 0; + bo->mem_type = 0; - switch(buf->flags & DRM_BO_MASK_MEM) { + switch(bo->flags & DRM_BO_MASK_MEM) { case DRM_BO_FLAG_MEM_TT: - buf->mem_type = DRM_BO_MEM_TT; + bo->mem_type = DRM_BO_MEM_TT; break; case DRM_BO_FLAG_MEM_VRAM: - buf->mem_type = DRM_BO_MEM_VRAM; + bo->mem_type = DRM_BO_MEM_VRAM; break; case DRM_BO_FLAG_MEM_LOCAL: - buf->mem_type = DRM_BO_MEM_LOCAL; + bo->mem_type = DRM_BO_MEM_LOCAL; break; default: BUG_ON(1); } - list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[buf->mem_type] : &bm->lru[buf->mem_type]; - list_add_tail(&buf->lru, list); + list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? + &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type]; + list_add_tail(&bo->lru, list); return; } @@ -97,18 +97,18 @@ static void drm_bo_add_to_lru(drm_buffer * bo locked. */ -static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict, +static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, int force_no_move) { - drm_device_t *dev = buf->dev; + drm_device_t *dev = bo->dev; int ret; - if (buf->mm_node) { + if (bo->mm_node) { mutex_lock(&dev->struct_mutex); if (evict) - ret = drm_evict_ttm(buf->ttm); + ret = drm_evict_ttm(bo->ttm); else - ret = drm_unbind_ttm(buf->ttm); + ret = drm_unbind_ttm(bo->ttm); if (ret) { mutex_unlock(&dev->struct_mutex); @@ -117,15 +117,15 @@ static int drm_move_tt_to_local(drm_buff return ret; } - if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(buf->mm_node); - buf->mm_node = NULL; + if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } - buf->flags &= ~DRM_BO_FLAG_MEM_TT; - buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->flags &= ~DRM_BO_FLAG_MEM_TT; + bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; return 0; } @@ -513,24 +513,24 @@ static int drm_bo_evict(drm_buffer_objec } /* - * buf->mutex locked. + * bo->mutex locked. */ -int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type, +int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, int no_wait) { - drm_device_t *dev = buf->dev; + drm_device_t *dev = bo->dev; drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + drm_buffer_object_t *entry; drm_mm_t *mm = &bm->manager[mem_type]; struct list_head *lru; - unsigned long size = buf->num_pages; + unsigned long size = bo->num_pages; int ret; mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(mm, size, buf->page_alignment, 1); + node = drm_mm_search_free(mm, size, bo->page_alignment, 1); if (node) break; @@ -538,15 +538,15 @@ int drm_bo_alloc_space(drm_buffer_object if (lru->next == lru) break; - bo = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, drm_buffer_object_t, lru); - atomic_inc(&bo->usage); + atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); - mutex_lock(&bo->mutex); + mutex_lock(&entry->mutex); BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE); - ret = drm_bo_evict(bo, mem_type, no_wait, 0); - mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(dev, bo); + ret = drm_bo_evict(entry, mem_type, no_wait, 0); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_unlocked(dev, entry); if (ret) return ret; mutex_lock(&dev->struct_mutex); @@ -558,13 +558,13 @@ int drm_bo_alloc_space(drm_buffer_object return -ENOMEM; } - node = drm_mm_get_block(node, size, buf->page_alignment); + node = drm_mm_get_block(node, size, bo->page_alignment); mutex_unlock(&dev->struct_mutex); BUG_ON(!node); - node->private = (void *)buf; + node->private = (void *)bo; - buf->mm_node = node; - buf->offset = node->start * PAGE_SIZE; + bo->mm_node = node; + bo->offset = node->start * PAGE_SIZE; return 0; } diff-tree 1e4c7d69f5b55f5299e5b0c220e4af1dfb21f69d (from ee4ac5c897faa499ad24c148b4f065bc770b529d) Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Mon Jan 29 13:11:55 2007 +0100 Some cleanup. A buffer object should only have one active memory type. diff --git a/linux-core/drmP.h b/linux-core/drmP.h index ff3fc67..9c748e6 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -999,11 +999,10 @@ typedef struct drm_buffer_object{ atomic_t mapped; uint32_t flags; uint32_t mask; + uint32_t mem_type; - drm_mm_node_t *node_ttm; /* MM node for on-card RAM */ - drm_mm_node_t *node_card; /* MM node for ttm*/ - struct list_head lru_ttm; /* LRU for the ttm pages*/ - struct list_head lru_card; /* For memory types with on-card RAM */ + drm_mm_node_t *mm_node; /* MM node for on-card RAM */ + struct list_head lru; struct list_head ddestroy; uint32_t fence_type; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index c0e431b..61db102 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -64,38 +64,6 @@ static inline uint32_t drm_bo_type_flags return (1 << (24 + type)); } -static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list, - unsigned type) -{ - switch (type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return list_entry(list, drm_buffer_object_t, lru_ttm); - case DRM_BO_MEM_VRAM: - case DRM_BO_MEM_VRAM_NM: - return list_entry(list, drm_buffer_object_t, lru_card); - default: - BUG_ON(1); - } - return NULL; -} - -static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo, - unsigned type) -{ - switch (type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return bo->node_ttm; - case DRM_BO_MEM_VRAM: - case DRM_BO_MEM_VRAM_NM: - return bo->node_card; - default: - BUG_ON(1); - } - return NULL; -} - /* * bo locked. dev->struct_mutex locked. */ @@ -104,31 +72,25 @@ static void drm_bo_add_to_lru(drm_buffer drm_buffer_manager_t * bm) { struct list_head *list; - unsigned mem_type; + buf->mem_type = 0; - if (buf->flags & DRM_BO_FLAG_MEM_TT) { - mem_type = DRM_BO_MEM_TT; - list = - (buf-> - flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[mem_type] : &bm->lru[mem_type]; - list_add_tail(&buf->lru_ttm, list); - } else { - mem_type = DRM_BO_MEM_LOCAL; - list = - (buf-> - flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[mem_type] : &bm->lru[mem_type]; - list_add_tail(&buf->lru_ttm, list); - } - if (buf->flags & DRM_BO_FLAG_MEM_VRAM) { - mem_type = DRM_BO_MEM_VRAM; - list = - (buf-> - flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[mem_type] : &bm->lru[mem_type]; - list_add_tail(&buf->lru_card, list); + switch(buf->flags & DRM_BO_MASK_MEM) { + case DRM_BO_FLAG_MEM_TT: + buf->mem_type = DRM_BO_MEM_TT; + break; + case DRM_BO_FLAG_MEM_VRAM: + buf->mem_type = DRM_BO_MEM_VRAM; + break; + case DRM_BO_FLAG_MEM_LOCAL: + buf->mem_type = DRM_BO_MEM_LOCAL; + break; + default: + BUG_ON(1); } + list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? + &bm->pinned[buf->mem_type] : &bm->lru[buf->mem_type]; + list_add_tail(&buf->lru, list); + return; } /* @@ -141,7 +103,7 @@ static int drm_move_tt_to_local(drm_buff drm_device_t *dev = buf->dev; int ret; - if (buf->node_ttm) { + if (buf->mm_node) { mutex_lock(&dev->struct_mutex); if (evict) ret = drm_evict_ttm(buf->ttm); @@ -156,8 +118,8 @@ static int drm_move_tt_to_local(drm_buff } if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(buf->node_ttm); - buf->node_ttm = NULL; + drm_mm_put_block(buf->mm_node); + buf->mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } @@ -206,8 +168,7 @@ static void drm_bo_destroy_locked(drm_de * Take away from lru lists. */ - list_del_init(&bo->lru_ttm); - list_del_init(&bo->lru_card); + list_del_init(&bo->lru); if (bo->ttm) { unsigned long _end = jiffies + DRM_HZ; @@ -232,13 +193,9 @@ static void drm_bo_destroy_locked(drm_de } } - if (bo->node_ttm) { - drm_mm_put_block(bo->node_ttm); - bo->node_ttm = NULL; - } - if (bo->node_card) { - drm_mm_put_block(bo->node_card); - bo->node_card = NULL; + if (bo->mm_node) { + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; } if (bo->ttm_object) { drm_ttm_object_deref_locked(dev, bo->ttm_object); @@ -431,7 +388,7 @@ int drm_fence_buffer_objects(drm_file_t if (!list) list = &bm->unfenced; - list_for_each_entry(entry, list, lru_ttm) { + list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); fence_type |= entry->fence_type; if (entry->fence_class != 0) { @@ -477,13 +434,12 @@ int drm_fence_buffer_objects(drm_file_t count = 0; l = f_list.next; while (l != &f_list) { - entry = list_entry(l, drm_buffer_object_t, lru_ttm); + entry = list_entry(l, drm_buffer_object_t, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - list_del_init(&entry->lru_card); if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) @@ -542,18 +498,11 @@ static int drm_bo_evict(drm_buffer_objec if (ret) goto out; mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru_ttm); + list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); } -#if 0 - else { - ret = drm_move_vram_to_local(bo); - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru_card); - mutex_unlock(&dev->struct_mutex); - } -#endif + if (ret) goto out; @@ -589,7 +538,7 @@ int drm_bo_alloc_space(drm_buffer_object if (lru->next == lru) break; - bo = drm_bo_entry(lru->next, mem_type); + bo = list_entry(lru->next, drm_buffer_object_t, lru); atomic_inc(&bo->usage); mutex_unlock(&dev->struct_mutex); @@ -614,11 +563,7 @@ int drm_bo_alloc_space(drm_buffer_object BUG_ON(!node); node->private = (void *)buf; - if (mem_type == DRM_BO_MEM_TT) { - buf->node_ttm = node; - } else { - buf->node_card = node; - } + buf->mm_node = node; buf->offset = node->start * PAGE_SIZE; return 0; } @@ -629,21 +574,21 @@ static int drm_move_local_to_tt(drm_buff drm_ttm_backend_t *be; int ret; - if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { - BUG_ON(bo->node_ttm); + if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { + BUG_ON(bo->mm_node); ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait); if (ret) return ret; } - DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start); + DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); mutex_lock(&dev->struct_mutex); ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, - bo->node_ttm->start); + bo->mm_node->start); if (ret) { - drm_mm_put_block(bo->node_ttm); - bo->node_ttm = NULL; + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; } mutex_unlock(&dev->struct_mutex); @@ -860,11 +805,7 @@ static int drm_bo_read_cached(drm_buffer int ret = 0; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (bo->node_card) - ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0); - if (ret) - return ret; - if (bo->node_ttm) + if (bo->mm_node) ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); return ret; } @@ -1215,13 +1156,9 @@ static int drm_buffer_object_validate(dr if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && !(new_flags & DRM_BO_FLAG_NO_MOVE)) { mutex_lock(&dev->struct_mutex); - if (bo->node_ttm) { - drm_mm_put_block(bo->node_ttm); - bo->node_ttm = NULL; - } - if (bo->node_card) { - drm_mm_put_block(bo->node_card); - bo->node_card = NULL; + if (bo->mm_node) { + drm_mm_put_block(bo->mm_node); + bo->mm_node = NULL; } mutex_unlock(&dev->struct_mutex); } @@ -1248,15 +1185,13 @@ static int drm_buffer_object_validate(dr DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); mutex_lock(&dev->struct_mutex); - list_del(&bo->lru_ttm); - list_add_tail(&bo->lru_ttm, &bm->unfenced); - list_del_init(&bo->lru_card); + list_del(&bo->lru); + list_add_tail(&bo->lru, &bm->unfenced); mutex_unlock(&dev->struct_mutex); } else { mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru_ttm); - list_del_init(&bo->lru_card); + list_del_init(&bo->lru); drm_bo_add_to_lru(bo, bm); mutex_unlock(&dev->struct_mutex); } @@ -1430,14 +1365,12 @@ int drm_buffer_object_create(drm_file_t atomic_set(&bo->usage, 1); atomic_set(&bo->mapped, -1); DRM_INIT_WAITQUEUE(&bo->event_queue); - INIT_LIST_HEAD(&bo->lru_ttm); - INIT_LIST_HEAD(&bo->lru_card); + INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); bo->dev = dev; bo->type = type; bo->num_pages = num_pages; - bo->node_card = NULL; - bo->node_ttm = NULL; + bo->mm_node = NULL; bo->page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { bo->offset = buffer_start; @@ -1653,7 +1586,7 @@ static int drm_bo_force_list_clean(drm_d clean = 1; list_for_each_safe(list, next, head) { prev = list->prev; - entry = drm_bo_entry(list, mem_type); + entry = list_entry(list, drm_buffer_object_t, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -1664,7 +1597,7 @@ static int drm_bo_force_list_clean(drm_d drm_bo_usage_deref_locked(dev, entry); goto retry; } - if (drm_bo_mm_node(entry, mem_type)) { + if (entry->mm_node) { clean = 0; /* |
From: <an...@ke...> - 2007-02-08 07:21:22
|
shared-core/drm.h | 3 +++ shared-core/drm_pciids.txt | 26 +++++++++++++------------- shared-core/i915_dma.c | 2 +- shared-core/i915_drv.h | 7 +++++++ 4 files changed, 24 insertions(+), 14 deletions(-) New commits: diff-tree 898aca1a66d5e685a01944f92d572641b7980c85 (from ef9a9d3cd1fb6f7def03ddea69af3db8502d8eb9) Author: Eric Anholt <er...@an...> Date: Tue Jan 23 08:34:25 2007 +0800 Warning fix: correct type of i915_mmio argument. diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 912fe0b..9624fac 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -798,7 +798,7 @@ static int i915_mmio(DRM_IOCTL_ARGS) DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } - DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_setparam_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data, sizeof(mmio)); if (mmio.reg >= mmio_table_size) diff-tree ef9a9d3cd1fb6f7def03ddea69af3db8502d8eb9 (from 8918748058bc1aff64298855cde09512e2128367) Author: Eric Anholt <er...@an...> Date: Tue Jan 23 08:19:43 2007 +0800 Define __iomem for systems without it. diff --git a/shared-core/drm.h b/shared-core/drm.h index 9efb1dc..0e3b9b8 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -61,6 +61,9 @@ #ifndef __user #define __user #endif +#ifndef __iomem +#define __iomem +#endif #ifdef __GNUC__ # define DEPRECATED __attribute__ ((deprecated)) diff-tree 8918748058bc1aff64298855cde09512e2128367 (from 17985f07d68322519919a7f629a6d2d9bf3916ed) Author: Eric Anholt <er...@an...> Date: Tue Jan 23 08:05:36 2007 +0800 Add chip family flags to i915 driver, and fix a missing '"' in mach64 ID list. diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 0d2639e..0fc9775 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -186,7 +186,7 @@ 0x1002 0x4c51 0 "3D Rage LT Pro" 0x1002 0x4c42 0 "3D Rage LT Pro AGP-133" 0x1002 0x4c44 0 "3D Rage LT Pro AGP-66" -0x1002 0x4759 0 "Rage 3D IICATI 3D RAGE IIC AGP(A12/A13) +0x1002 0x4759 0 "Rage 3D IICATI 3D RAGE IIC AGP(A12/A13)" 0x1002 0x474c 0 "Rage XC" 0x1002 0x474f 0 "Rage XL" 0x1002 0x4752 0 "Rage XL" @@ -266,18 +266,18 @@ [ffb] [i915] -0x8086 0x3577 0 "Intel i830M GMCH" -0x8086 0x2562 0 "Intel i845G GMCH" -0x8086 0x3582 0 "Intel i852GM/i855GM GMCH" -0x8086 0x2572 0 "Intel i865G GMCH" -0x8086 0x2582 0 "Intel i915G" -0x8086 0x2592 0 "Intel i915GM" -0x8086 0x2772 0 "Intel i945G" -0x8086 0x27A2 0 "Intel i945GM" -0x8086 0x2972 0 "Intel i946GZ" -0x8086 0x2982 0 "Intel i965G" -0x8086 0x2992 0 "Intel i965Q" -0x8086 0x29A2 0 "Intel i965G" +0x8086 0x3577 CHIP_I8XX "Intel i830M GMCH" +0x8086 0x2562 CHIP_I8XX "Intel i845G GMCH" +0x8086 0x3582 CHIP_I8XX "Intel i852GM/i855GM GMCH" +0x8086 0x2572 CHIP_I8XX "Intel i865G GMCH" +0x8086 0x2582 CHIP_I9XX|CHIP_I915 "Intel i915G" +0x8086 0x2592 CHIP_I9XX|CHIP_I915 "Intel i915GM" +0x8086 0x2772 CHIP_I9XX|CHIP_I915 "Intel i945G" +0x8086 0x27A2 CHIP_I9XX|CHIP_I915 "Intel i945GM" +0x8086 0x2972 CHIP_I9XX|CHIP_I965 "Intel i946GZ" +0x8086 0x2982 CHIP_I9XX|CHIP_I965 "Intel i965G" +0x8086 0x2992 CHIP_I9XX|CHIP_I965 "Intel i965Q" +0x8086 0x29A2 CHIP_I9XX|CHIP_I965 "Intel i965G" [imagine] 0x105d 0x2309 IMAGINE_128 "Imagine 128" diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index ef9f363..5a76cb1 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -132,6 +132,13 @@ typedef struct drm_i915_private { unsigned int swaps_pending; } drm_i915_private_t; +enum intel_chip_family { + CHIP_I8XX = 0x01, + CHIP_I9XX = 0x02, + CHIP_I915 = 0x04, + CHIP_I965 = 0x08, +}; + extern drm_ioctl_desc_t i915_ioctls[]; extern int i915_max_ioctl; |
From: <dar...@ke...> - 2007-03-13 03:56:29
|
shared-core/nouveau_drv.h | 8 +- shared-core/nouveau_fifo.c | 56 ++++++++++------ shared-core/nouveau_object.c | 148 ++++++++++++++++++++----------------------- shared-core/nouveau_state.c | 1 shared-core/nv40_graph.c | 1 5 files changed, 113 insertions(+), 101 deletions(-) New commits: diff-tree 90f8c691a57a79a6a9652b7d2a01c59acc127b3f (from 1775202cf96c51018bf369b1b4d08023d622513c) Author: Ben Skeggs <sk...@gm...> Date: Tue Mar 13 14:51:55 2007 +1100 nouveau: make sure cmdbuf object gets destroyed diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index a4b0589..c27c93e 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -191,7 +191,7 @@ extern int nouveau_fifo_id_get(drm_devi extern void nouveau_fifo_free(drm_device_t *dev, int channel); /* nouveau_object.c */ -extern void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp); +extern void nouveau_object_cleanup(drm_device_t *dev, int channel); extern struct nouveau_object * nouveau_object_gr_create(drm_device_t *dev, int channel, int class); extern struct nouveau_object * diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 8c4e830..3ffb051 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -527,15 +527,20 @@ static int nouveau_fifo_alloc(drm_device if (i==nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); + /* that fifo is used */ + dev_priv->fifos[i].used = 1; + dev_priv->fifos[i].filp = filp; + /* FIFO has no objects yet */ + dev_priv->fifos[i].objs = NULL; + /* allocate a command buffer, and create a dma object for the gpu */ ret = nouveau_fifo_cmdbuf_alloc(dev, i); - if (ret) return ret; + if (ret) { + nouveau_fifo_free(dev, i); + return ret; + } cb_obj = dev_priv->fifos[i].cmdbuf_obj; - /* that fifo is used */ - dev_priv->fifos[i].used=1; - dev_priv->fifos[i].filp=filp; - init->channel = i; init->put_base = 0; dev_priv->cur_fifo = init->channel; @@ -638,8 +643,6 @@ static int nouveau_fifo_alloc(drm_device init->cmdbuf = dev_priv->fifos[init->channel].cmdbuf_mem->start; init->cmdbuf_size = dev_priv->fifos[init->channel].cmdbuf_mem->size; - /* FIFO has no objects yet */ - dev_priv->fifos[init->channel].objs = NULL; dev_priv->fifo_alloc_count++; DRM_INFO("%s: initialised FIFO %d\n", __func__, init->channel); @@ -647,43 +650,51 @@ static int nouveau_fifo_alloc(drm_device } /* stops a fifo */ -void nouveau_fifo_free(drm_device_t* dev,int n) +void nouveau_fifo_free(drm_device_t* dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; int i; int ctx_size = nouveau_fifo_ctx_size(dev); - dev_priv->fifos[n].used=0; - DRM_INFO("%s: freeing fifo %d\n", __func__, n); + chan->used = 0; + DRM_INFO("%s: freeing fifo %d\n", __func__, channel); /* disable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); - NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)&~(1<<n)); + NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel)); // FIXME XXX needs more code /* Clean RAMFC */ for (i=0;i<ctx_size;i+=4) { DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN + - dev_priv->ramfc_offset + n*ctx_size + i)); - NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + n*ctx_size + i, 0); + dev_priv->ramfc_offset + + channel*ctx_size + i)); + NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + + channel*ctx_size + i, 0); } + /* Cleanup PGRAPH state */ if (dev_priv->card_type >= NV_40) - nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + nouveau_instmem_free(dev, chan->ramin_grctx); else if (dev_priv->card_type >= NV_30) { } else if (dev_priv->card_type >= NV_20) { /* clear ctx table */ - INSTANCE_WR(dev_priv->ctx_table, n, 0); - nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + INSTANCE_WR(dev_priv->ctx_table, channel, 0); + nouveau_instmem_free(dev, chan->ramin_grctx); } /* reenable the fifo caches */ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); - /* Deallocate command buffer, and dma object */ - nouveau_mem_free(dev, dev_priv->fifos[n].cmdbuf_mem); + /* Deallocate command buffer */ + if (chan->cmdbuf_mem) + nouveau_mem_free(dev, chan->cmdbuf_mem); + + /* Destroy objects belonging to the channel */ + nouveau_object_cleanup(dev, channel); dev_priv->fifo_alloc_count--; } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index c5cf849..83f039d 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -461,17 +461,13 @@ nouveau_object_free(drm_device_t *dev, s drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } -void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp) +void nouveau_object_cleanup(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int channel; - channel = nouveau_fifo_id_get(dev, filp); - if (channel == -1) - return; - - while (dev_priv->fifos[channel].objs) + while (dev_priv->fifos[channel].objs) { nouveau_object_free(dev, dev_priv->fifos[channel].objs); + } } int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e1fc633..ed45c16 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -34,7 +34,6 @@ void nouveau_preclose(drm_device_t * dev nouveau_mem_release(filp,dev_priv->fb_heap); nouveau_mem_release(filp,dev_priv->agp_heap); - nouveau_object_cleanup(dev, filp); nouveau_fifo_cleanup(dev, filp); } diff-tree 1775202cf96c51018bf369b1b4d08023d622513c (from 7e2bbe295424adfcd455a4c4b42dd0342087615e) Author: Ben Skeggs <sk...@gm...> Date: Tue Mar 13 14:18:03 2007 +1100 nouveau: associate all created objects with a channel + cleanups diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index c3d19bb..a4b0589 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -61,6 +61,7 @@ struct nouveau_object { struct nouveau_object *next; struct nouveau_object *prev; + int channel; struct mem_block *instance; uint32_t ht_loc; @@ -192,9 +193,12 @@ extern void nouveau_fifo_free(drm_device /* nouveau_object.c */ extern void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp); extern struct nouveau_object * -nouveau_dma_object_create(drm_device_t *dev, int class, +nouveau_object_gr_create(drm_device_t *dev, int channel, int class); +extern struct nouveau_object * +nouveau_object_dma_create(drm_device_t *dev, int channel, int class, uint32_t offset, uint32_t size, int access, int target); +extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj); extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS); extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS); extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index ebdf7fb..8c4e830 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -239,12 +239,14 @@ nouveau_fifo_cmdbuf_alloc(struct drm_dev } if (cb->flags & NOUVEAU_MEM_AGP) { - cb_dma = nouveau_dma_object_create(dev, NV_CLASS_DMA_IN_MEMORY, + cb_dma = nouveau_object_dma_create(dev, channel, + NV_CLASS_DMA_IN_MEMORY, cb->start - dev_priv->agp_phys, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP); } else if (dev_priv->card_type != NV_04) { - cb_dma = nouveau_dma_object_create(dev, NV_CLASS_DMA_IN_MEMORY, + cb_dma = nouveau_object_dma_create(dev, channel, + NV_CLASS_DMA_IN_MEMORY, cb->start - drm_get_resource_start(dev, 1), cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM); @@ -253,7 +255,8 @@ nouveau_fifo_cmdbuf_alloc(struct drm_dev * exact reason for existing :) PCI access to cmdbuf in * VRAM. */ - cb_dma = nouveau_dma_object_create(dev, NV_CLASS_DMA_IN_MEMORY, + cb_dma = nouveau_object_dma_create(dev, channel, + NV_CLASS_DMA_IN_MEMORY, cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI); } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index ed214c6..c5cf849 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -54,10 +54,10 @@ nouveau_chip_instance_get(drm_device_t * } static void -nouveau_object_link(drm_device_t *dev, int channel, struct nouveau_object *obj) +nouveau_object_link(drm_device_t *dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; if (!chan->objs) { chan->objs = obj; @@ -72,11 +72,10 @@ nouveau_object_link(drm_device_t *dev, i } static void -nouveau_object_unlink(drm_device_t *dev, int channel, - struct nouveau_object *obj) +nouveau_object_unlink(drm_device_t *dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; if (obj->prev == NULL) { if (obj->next) @@ -155,7 +154,7 @@ nouveau_ht_handle_hash(drm_device_t *dev } static int -nouveau_ht_object_insert(drm_device_t* dev, int channel, +nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; @@ -163,6 +162,7 @@ nouveau_ht_object_insert(drm_device_t* d int ht_end = ht_base + dev_priv->ramht_size; int o_ofs, ofs; + obj->handle = handle; o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { @@ -212,7 +212,8 @@ static void nouveau_hash_table_remove(dr } } -static struct nouveau_object *nouveau_instance_alloc(drm_device_t* dev) +static struct nouveau_object * +nouveau_object_instance_alloc(drm_device_t* dev, int channel) { drm_nouveau_private_t *dev_priv=dev->dev_private; struct nouveau_object *obj; @@ -223,6 +224,8 @@ static struct nouveau_object *nouveau_in DRM_ERROR("couldn't alloc memory for object\n"); return NULL; } + + /* Allocate instance memory */ obj->instance = nouveau_instmem_alloc(dev, (dev_priv->card_type >= NV_40 ? 32 : 16), 4); if (!obj->instance) { @@ -231,25 +234,28 @@ static struct nouveau_object *nouveau_in return NULL; } + /* Bind object to channel */ + obj->channel = channel; + obj->handle = ~0; + nouveau_object_link(dev, obj); + return obj; } -static void nouveau_object_instance_free(drm_device_t *dev, - struct nouveau_object *obj) +static void +nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int count, i; + int i; - if (dev_priv->card_type >= NV_40) - count = 8; - else - count = 4; + /* Unbind object from channel */ + nouveau_object_unlink(dev, obj); /* Clean RAMIN entry */ DRM_DEBUG("Instance entry for 0x%08x" "(engine %d, class 0x%x) before destroy:\n", obj->handle, obj->engine, obj->class); - for (i=0;i<count;i++) { + for (i=0; i<(obj->instance->size/4); i++) { DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), INSTANCE_RD(obj->instance, i)); INSTANCE_WR(obj->instance, i, 0x00000000); @@ -285,7 +291,7 @@ static void nouveau_object_instance_free */ struct nouveau_object * -nouveau_dma_object_create(drm_device_t* dev, int class, +nouveau_object_dma_create(drm_device_t* dev, int channel, int class, uint32_t offset, uint32_t size, int access, int target) { @@ -320,7 +326,7 @@ nouveau_dma_object_create(drm_device_t* frame = offset & ~0x00000FFF; adjust = offset & 0x00000FFF; - obj = nouveau_instance_alloc(dev); + obj = nouveau_object_instance_alloc(dev, channel); if (!obj) { DRM_ERROR("couldn't allocate DMA object\n"); return obj; @@ -393,15 +399,15 @@ nouveau_dma_object_create(drm_device_t* entry[5]: set to 0? */ -static struct nouveau_object * -nouveau_context_object_create(drm_device_t* dev, int class) +struct nouveau_object * +nouveau_object_gr_create(drm_device_t* dev, int channel, int class) { drm_nouveau_private_t *dev_priv=dev->dev_private; struct nouveau_object *obj; DRM_DEBUG("class=%x\n", class); - obj = nouveau_instance_alloc(dev); + obj = nouveau_object_instance_alloc(dev, channel); if (!obj) { DRM_ERROR("couldn't allocate context object\n"); return obj; @@ -446,16 +452,13 @@ nouveau_context_object_create(drm_device return obj; } -static void -nouveau_object_free(drm_device_t *dev, int channel, struct nouveau_object *obj) +void +nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) { - nouveau_object_unlink(dev, channel, obj); - nouveau_object_instance_free(dev, obj); - nouveau_hash_table_remove(dev, obj); - + if (obj->handle != ~0) + nouveau_hash_table_remove(dev, obj); drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - return; } void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp) @@ -468,8 +471,7 @@ void nouveau_object_cleanup(drm_device_t return; while (dev_priv->fifos[channel].objs) - nouveau_object_free(dev, channel, - dev_priv->fifos[channel].objs); + nouveau_object_free(dev, dev_priv->fifos[channel].objs); } int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) @@ -494,18 +496,15 @@ int nouveau_ioctl_object_init(DRM_IOCTL_ return DRM_ERR(EINVAL); } - obj = nouveau_context_object_create(dev, init.class); + obj = nouveau_object_gr_create(dev, channel, init.class); if (!obj) return DRM_ERR(ENOMEM); - obj->handle = init.handle; - if (nouveau_ht_object_insert(dev, channel, obj)) { - nouveau_object_free(dev, channel, obj); + if (nouveau_ht_object_insert(dev, channel, init.handle, obj)) { + nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); } - nouveau_object_link(dev, channel, obj); - return 0; } @@ -590,20 +589,18 @@ int nouveau_ioctl_dma_object_init(DRM_IO return DRM_ERR(EINVAL); } - obj = nouveau_dma_object_create(dev, init.class, + obj = nouveau_object_dma_create(dev, channel, init.class, init.offset, init.size, init.access, init.target); if (!obj) return DRM_ERR(ENOMEM); obj->handle = init.handle; - if (nouveau_ht_object_insert(dev, channel, obj)) { - nouveau_object_free(dev, channel, obj); + if (nouveau_ht_object_insert(dev, channel, init.handle, obj)) { + nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); } - nouveau_object_link(dev, channel, obj); - return 0; } diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 5e0d8d7..f2650ca 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -894,7 +894,6 @@ nv40_graph_init(drm_device_t *dev) drm_nouveau_private_t *dev_priv = (drm_nouveau_private_t *)dev->dev_private; uint32_t *ctx_voodoo; - uint32_t pg0220_inst; int i; switch (dev_priv->chipset) { diff-tree 7e2bbe295424adfcd455a4c4b42dd0342087615e (from 462a6ea4caadae0c68f6fe3e0343950ced2095cb) Author: Ben Skeggs <sk...@gm...> Date: Tue Mar 13 13:43:14 2007 +1100 nouveau: s/fifo/channel/ diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index b3c4b0e..ed214c6 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -44,8 +44,8 @@ * in the future when we can access more instance ram which isn't mapped into * the PRAMIN aperture */ -uint32_t nouveau_chip_instance_get(drm_device_t *dev, - struct mem_block *mem) +uint32_t +nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem) { uint32_t inst = (uint32_t)mem->start >> 4; DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", @@ -53,34 +53,35 @@ uint32_t nouveau_chip_instance_get(drm_d return inst; } -static void nouveau_object_link(drm_device_t *dev, int fifo_num, - struct nouveau_object *obj) +static void +nouveau_object_link(drm_device_t *dev, int channel, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - if (!fifo->objs) { - fifo->objs = obj; + if (!chan->objs) { + chan->objs = obj; return; } obj->prev = NULL; - obj->next = fifo->objs; + obj->next = chan->objs; - fifo->objs->prev = obj; - fifo->objs = obj; + chan->objs->prev = obj; + chan->objs = obj; } -static void nouveau_object_unlink(drm_device_t *dev, int fifo_num, - struct nouveau_object *obj) +static void +nouveau_object_unlink(drm_device_t *dev, int channel, + struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; if (obj->prev == NULL) { if (obj->next) obj->next->prev = NULL; - fifo->objs = obj->next; + chan->objs = obj->next; } else if (obj->next == NULL) { if (obj->prev) obj->prev->next = NULL; @@ -91,11 +92,11 @@ static void nouveau_object_unlink(drm_de } static struct nouveau_object * -nouveau_object_handle_find(drm_device_t *dev, int fifo_num, uint32_t handle) +nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; - struct nouveau_object *obj = fifo->objs; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_object *obj = chan->objs; DRM_DEBUG("Looking for handle 0x%08x\n", handle); while (obj) { @@ -138,8 +139,8 @@ nouveau_object_handle_find(drm_device_t The key into the hash table depends on the object handle and channel id and is given as: */ -static uint32_t nouveau_handle_hash(drm_device_t* dev, uint32_t handle, - int fifo) +static uint32_t +nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; uint32_t hash = 0; @@ -149,19 +150,20 @@ static uint32_t nouveau_handle_hash(drm_ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; } - hash ^= fifo << (dev_priv->ramht_bits - 4); + hash ^= channel << (dev_priv->ramht_bits - 4); return hash << 3; } -static int nouveau_hash_table_insert(drm_device_t* dev, int fifo, - struct nouveau_object *obj) +static int +nouveau_ht_object_insert(drm_device_t* dev, int channel, + struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; int ht_base = NV_RAMIN + dev_priv->ramht_offset; int ht_end = ht_base + dev_priv->ramht_size; int o_ofs, ofs; - o_ofs = ofs = nouveau_handle_hash(dev, obj->handle, fifo); + o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { ofs += 8; @@ -174,19 +176,19 @@ static int nouveau_hash_table_insert(drm ofs += ht_base; DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", - fifo, obj->handle, ofs); + channel, obj->handle, ofs); NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); if (dev_priv->card_type >= NV_40) NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - (fifo << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | nouveau_chip_instance_get(dev, obj->instance) ); else NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, NV_RAMHT_CONTEXT_VALID | - (fifo << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | nouveau_chip_instance_get(dev, obj->instance) ); @@ -445,9 +447,9 @@ nouveau_context_object_create(drm_device } static void -nouveau_object_free(drm_device_t *dev, int fifo_num, struct nouveau_object *obj) +nouveau_object_free(drm_device_t *dev, int channel, struct nouveau_object *obj) { - nouveau_object_unlink(dev, fifo_num, obj); + nouveau_object_unlink(dev, channel, obj); nouveau_object_instance_free(dev, obj); nouveau_hash_table_remove(dev, obj); @@ -459,14 +461,15 @@ nouveau_object_free(drm_device_t *dev, i void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int fifo; + int channel; - fifo = nouveau_fifo_id_get(dev, filp); - if (fifo == -1) + channel = nouveau_fifo_id_get(dev, filp); + if (channel == -1) return; - while (dev_priv->fifos[fifo].objs) - nouveau_object_free(dev, fifo, dev_priv->fifos[fifo].objs); + while (dev_priv->fifos[channel].objs) + nouveau_object_free(dev, channel, + dev_priv->fifos[channel].objs); } int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) @@ -474,10 +477,10 @@ int nouveau_ioctl_object_init(DRM_IOCTL_ DRM_DEVICE; drm_nouveau_object_init_t init; struct nouveau_object *obj; - int fifo; + int channel; - fifo = nouveau_fifo_id_get(dev, filp); - if (fifo == -1) + channel = nouveau_fifo_id_get(dev, filp); + if (channel == -1) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) @@ -485,9 +488,9 @@ int nouveau_ioctl_object_init(DRM_IOCTL_ //FIXME: check args, only allow trusted objects to be created - if (nouveau_object_handle_find(dev, fifo, init.handle)) { + if (nouveau_object_handle_find(dev, channel, init.handle)) { DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - fifo, init.handle); + channel, init.handle); return DRM_ERR(EINVAL); } @@ -496,12 +499,12 @@ int nouveau_ioctl_object_init(DRM_IOCTL_ return DRM_ERR(ENOMEM); obj->handle = init.handle; - if (nouveau_hash_table_insert(dev, fifo, obj)) { - nouveau_object_free(dev, fifo, obj); + if (nouveau_ht_object_insert(dev, channel, obj)) { + nouveau_object_free(dev, channel, obj); return DRM_ERR(ENOMEM); } - nouveau_object_link(dev, fifo, obj); + nouveau_object_link(dev, channel, obj); return 0; } @@ -569,10 +572,10 @@ int nouveau_ioctl_dma_object_init(DRM_IO DRM_DEVICE; drm_nouveau_dma_object_init_t init; struct nouveau_object *obj; - int fifo; + int channel; - fifo = nouveau_fifo_id_get(dev, filp); - if (fifo == -1) + channel = nouveau_fifo_id_get(dev, filp); + if (channel == -1) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) @@ -581,9 +584,9 @@ int nouveau_ioctl_dma_object_init(DRM_IO if (nouveau_dma_object_check_access(dev, &init)) return DRM_ERR(EPERM); - if (nouveau_object_handle_find(dev, fifo, init.handle)) { + if (nouveau_object_handle_find(dev, channel, init.handle)) { DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - fifo, init.handle); + channel, init.handle); return DRM_ERR(EINVAL); } @@ -594,12 +597,12 @@ int nouveau_ioctl_dma_object_init(DRM_IO return DRM_ERR(ENOMEM); obj->handle = init.handle; - if (nouveau_hash_table_insert(dev, fifo, obj)) { - nouveau_object_free(dev, fifo, obj); + if (nouveau_ht_object_insert(dev, channel, obj)) { + nouveau_object_free(dev, channel, obj); return DRM_ERR(ENOMEM); } - nouveau_object_link(dev, fifo, obj); + nouveau_object_link(dev, channel, obj); return 0; } |
From: <ahu...@ke...> - 2007-07-13 14:13:37
|
shared-core/nouveau_object.c | 2 +- 1 files changed, 1 insertion(+), 1 deletion(-) New commits: diff-tree 00a5ab760b1d65ceea95e703d8ce8ecf8b63fbb3 (from parents) Merge: 5ae3ad4f015aa072180a0c55255832be4e7557cf 3007b03bdf608708a50b842d4291d3640c30f2c5 Author: Arthur Huillet <art...@fr...> Date: Fri Jul 13 16:03:25 2007 +0200 Merge commit 'public/master' diff-tree 5ae3ad4f015aa072180a0c55255832be4e7557cf (from 0029713451af6f5f216079775ff77cae9b423c0e) Author: Arthur Huillet <art...@fr...> Date: Fri Jul 13 15:57:17 2007 +0200 now attempting to create PCI object only when there is a pci_heap diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index c5697d8..aab2e3a 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -918,7 +918,7 @@ nouveau_gpuobj_channel_init(struct drm_d return ret; } } - else { + else if ( dev_priv->pci_heap) { if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ /*PCI*/ diff-tree 3007b03bdf608708a50b842d4291d3640c30f2c5 (from 851c950d988e5a47fa6add71427e5ef8d4dcf231) Author: Arthur Huillet <art...@fr...> Date: Fri Jul 13 15:57:17 2007 +0200 now attempting to create PCI object only when there is a pci_heap diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 16b38e9..de1f0ca 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -918,7 +918,7 @@ nouveau_gpuobj_channel_init(drm_device_t return ret; } } - else { + else if ( dev_priv->pci_heap) { if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ /*PCI*/ |
From: <dar...@ke...> - 2007-08-10 04:27:59
|
shared-core/nouveau_drv.h | 5 shared-core/nouveau_fifo.c | 2 shared-core/nouveau_irq.c | 87 +++------------ shared-core/nouveau_object.c | 76 ++++++++++--- shared-core/nouveau_reg.h | 10 + shared-core/nouveau_state.c | 5 shared-core/nv04_fifo.c | 2 shared-core/nv10_fifo.c | 2 shared-core/nv40_fifo.c | 2 shared-core/nv50_fifo.c | 37 ++---- shared-core/nv50_instmem.c | 246 ++++++++++++++++++++++++++----------------- 11 files changed, 267 insertions(+), 207 deletions(-) New commits: diff-tree a46104674f129e873b8dfa29cf8aac9c67bd77be (from 39907f613b6c84499c34c9a6ece5f5dde64788c0) Author: Ben Skeggs <sk...@gm...> Date: Fri Aug 10 13:54:26 2007 +1000 nouveau/nv50: demagic instmem setup. diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 4d5c7f7..e3d0ff4 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -393,7 +393,8 @@ extern int nouveau_gpuobj_new_ref(struct struct nouveau_channel *ref_chan, uint32_t handle, int size, int align, uint32_t flags, struct nouveau_gpuobj_ref **); -extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t offset, +extern int nouveau_gpuobj_new_fake(struct drm_device *, + uint32_t p_offset, uint32_t b_offset, uint32_t size, uint32_t flags, struct nouveau_gpuobj **, struct nouveau_gpuobj_ref**); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 22bced1..f0c2a55 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -285,8 +285,6 @@ nouveau_fifo_alloc(struct drm_device *de * When there are no more contexts, you lost */ for(channel=0; channel<nouveau_fifo_number(dev); channel++) { - if ((dev_priv->card_type == NV_50) && (channel == 0)) - continue; if (dev_priv->fifos[channel] == NULL) break; } diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index d4142e4..e0cb334 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -72,6 +72,8 @@ nouveau_ramht_hash_handle(struct drm_dev uint32_t hash = 0; int i; + DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle); + for (i=32;i>0;i-=dev_priv->ramht_bits) { hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; @@ -80,7 +82,7 @@ nouveau_ramht_hash_handle(struct drm_dev hash ^= channel << (dev_priv->ramht_bits - 4); hash <<= 3; - DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); + DRM_DEBUG("hash=0x%08x\n", hash); return hash; } @@ -286,7 +288,7 @@ nouveau_gpuobj_init(struct drm_device *d if (dev_priv->card_type < NV_50) { if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, - dev_priv->ramht_size, + ~0, dev_priv->ramht_size, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, &dev_priv->ramht, NULL))) @@ -346,7 +348,13 @@ nouveau_gpuobj_del(struct drm_device *de if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - engine->instmem.clear(dev, gpuobj); + if (gpuobj->im_backing) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + drm_free(gpuobj->im_backing, + sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER); + else + engine->instmem.clear(dev, gpuobj); + } if (gpuobj->im_pramin) { if (gpuobj->flags & NVOBJ_FLAG_FAKE) @@ -525,7 +533,8 @@ nouveau_gpuobj_ref_find(struct nouveau_c } int -nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, +nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, + uint32_t b_offset, uint32_t size, uint32_t flags, struct nouveau_gpuobj **pgpuobj, struct nouveau_gpuobj_ref **pref) { @@ -533,8 +542,8 @@ nouveau_gpuobj_new_fake(struct drm_devic struct nouveau_gpuobj *gpuobj = NULL; int i; - DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", - offset, size, flags); + DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", + p_offset, b_offset, size, flags); gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); if (!gpuobj) @@ -545,14 +554,27 @@ nouveau_gpuobj_new_fake(struct drm_devic list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), - DRM_MEM_DRIVER); - if (!gpuobj->im_pramin) { - nouveau_gpuobj_del(dev, &gpuobj); - return -ENOMEM; + if (p_offset != ~0) { + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + gpuobj->im_pramin->start = p_offset; + gpuobj->im_pramin->size = size; + } + + if (b_offset != ~0) { + gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_backing) { + nouveau_gpuobj_del(dev, &gpuobj); + return -ENOMEM; + } + gpuobj->im_backing->start = b_offset; + gpuobj->im_backing->size = size; } - gpuobj->im_pramin->start = offset; - gpuobj->im_pramin->size = size; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { for (i = 0; i < gpuobj->im_pramin->size; i += 4) @@ -962,7 +984,7 @@ nouveau_gpuobj_channel_init(struct nouve vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; vm_offset += chan->ramin->gpuobj->im_pramin->start; - if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, + if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 0, &chan->vm_pd, NULL))) return ret; for (i=0; i<0x4000; i+=8) { diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 47d54b2..6561462 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -80,6 +80,16 @@ #define NV40_PMC_1708 0x00001708 #define NV40_PMC_170C 0x0000170C +/* probably PMC ? */ +#define NV50_PUNK_BAR0_PRAMIN 0x00001700 +#define NV50_PUNK_BAR_CFG_BASE 0x00001704 +#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30) +#define NV50_PUNK_BAR1_CTXDMA 0x00001708 +#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31) +#define NV50_PUNK_BAR3_CTXDMA 0x0000170C +#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31) +#define NV50_PUNK_UNK1710 0x00001710 + #define NV04_PTIMER_INTR_0 0x00009100 #define NV04_PTIMER_INTR_EN_0 0x00009140 #define NV04_PTIMER_NUMERATOR 0x00009200 diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index 4d61f4f..d750ced 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -42,7 +42,7 @@ nv04_fifo_create_context(struct nouveau_ struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, NV04_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index a056460..c86725d 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -43,7 +43,7 @@ nv10_fifo_create_context(struct nouveau_ struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0, NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index f04c288..eb160ee 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -43,7 +43,7 @@ nv40_fifo_create_context(struct nouveau_ struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; - if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), + if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index f915d33..71b89d6 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -30,7 +30,6 @@ typedef struct { struct nouveau_gpuobj_ref *thingo; - struct nouveau_gpuobj_ref *dummyctx; } nv50_fifo_priv; #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) @@ -47,7 +46,7 @@ nv50_fifo_init_thingo(struct drm_device INSTANCE_WR(thingo->gpuobj, 0, 0x7e); INSTANCE_WR(thingo->gpuobj, 1, 0x7e); - for (i = 0; i <NV_MAX_FIFO_NUMBER; i++, fi) { + for (i = 1; i < 127; i++, fi) { if (dev_priv->fifos[i]) { INSTANCE_WR(thingo->gpuobj, fi, i); fi++; @@ -60,7 +59,7 @@ nv50_fifo_init_thingo(struct drm_device } static int -nv50_fifo_channel_enable(struct drm_device *dev, int channel) +nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[channel]; @@ -83,7 +82,7 @@ nv50_fifo_channel_enable(struct drm_devi NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); } - nv50_fifo_init_thingo(dev); + if (!nt) nv50_fifo_init_thingo(dev); return 0; } @@ -156,18 +155,9 @@ static int nv50_fifo_init_regs(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; - int ret; DRM_DEBUG("\n"); - if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 0x1000, - 0x1000, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, - &priv->dummyctx))) - return ret; - NV_WRITE(0x2500, 0); NV_WRITE(0x3250, 0); NV_WRITE(0x3220, 0); @@ -175,13 +165,9 @@ nv50_fifo_init_regs(struct drm_device *d NV_WRITE(0x3210, 0); NV_WRITE(0x3270, 0); - if (IS_G80) { - NV_WRITE(0x2600, (priv->dummyctx->instance>>8) | (1<<31)); - NV_WRITE(0x27fc, (priv->dummyctx->instance>>8) | (1<<31)); - } else { - NV_WRITE(0x2600, (priv->dummyctx->instance>>12) | (1<<31)); - NV_WRITE(0x27fc, (priv->dummyctx->instance>>12) | (1<<31)); - } + /* Enable dummy channels setup by nv50_instmem.c */ + nv50_fifo_channel_enable(dev, 0, 1); + nv50_fifo_channel_enable(dev, 127, 1); return 0; } @@ -209,6 +195,7 @@ nv50_fifo_init(struct drm_device *dev) DRM_ERROR("error creating thingo: %d\n", ret); return ret; } + nv50_fifo_init_context_table(dev); nv50_fifo_init_regs__nv(dev); @@ -230,7 +217,6 @@ nv50_fifo_takedown(struct drm_device *de return; nouveau_gpuobj_ref_del(dev, &priv->thingo); - nouveau_gpuobj_ref_del(dev, &priv->dummyctx); dev_priv->Engine.fifo.priv = NULL; drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); @@ -248,7 +234,7 @@ nv50_fifo_create_context(struct nouveau_ if (IS_G80) { uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; - if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100, + if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, ~0, 0x100, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &ramfc, &chan->ramfc))) @@ -285,7 +271,7 @@ nv50_fifo_create_context(struct nouveau_ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); } - if ((ret = nv50_fifo_channel_enable(dev, chan->id))) { + if ((ret = nv50_fifo_channel_enable(dev, chan->id, 0))) { DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret); nouveau_gpuobj_ref_del(dev, &chan->ramfc); return ret; @@ -302,6 +288,11 @@ nv50_fifo_destroy_context(struct nouveau DRM_DEBUG("ch%d\n", chan->id); nv50_fifo_channel_disable(dev, chan->id, 0); + + /* Dummy channel, also used on ch 127 */ + if (chan->id == 0) + nv50_fifo_channel_disable(dev, 127, 0); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); } diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index c26b1db..1eeb54d 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -31,118 +31,162 @@ typedef struct { uint32_t save1700[5]; /* 0x1700->0x1710 */ + + struct nouveau_gpuobj_ref *pramin_pt; + struct nouveau_gpuobj_ref *pramin_bar; } nv50_instmem_priv; #define NV50_INSTMEM_PAGE_SHIFT 12 #define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) -#define NV50_INSTMEM_RSVD_SIZE (64 * 1024) #define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) +/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN + */ +#define BAR0_WI32(g,o,v) do { \ + uint32_t offset; \ + if ((g)->im_backing) { \ + offset = (g)->im_backing->start; \ + } else { \ + offset = chan->ramin->gpuobj->im_backing->start; \ + offset += (g)->im_pramin->start; \ + } \ + offset += (o); \ + NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \ +} while(0) + int nv50_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; nv50_instmem_priv *priv; - uint32_t rv, pt, pts, cb, cb0, cb1, unk, as; - uint32_t i, v; - int ret; + int ret, i; + uint32_t v; priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); if (!priv) return -ENOMEM; dev_priv->Engine.instmem.priv = priv; - /* Save current state */ - for (i = 0x1700; i <= 0x1710; i+=4) - priv->save1700[(i-0x1700)/4] = NV_READ(i); + /* Reserve the last MiB of VRAM, we should probably try to avoid + * setting up the below tables over the top of the VBIOS image at + * some point. + */ + dev_priv->ramin_rsvd_vram = 1 << 20; + c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; + c_size = 128 << 10; + c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; + c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; + c_base = c_vmpd + 0x4000; + pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size); + + DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset); + DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); + DRM_DEBUG(" Aperture size: %d MiB\n", + (uint32_t)dev_priv->ramin->size >> 20); + DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10); + + NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16)); + + /* Create a fake channel, and use it as our "dummy" channels 0/127. + * The main reason for creating a channel is so we can use the gpuobj + * code. However, it's probably worth noting that NVIDIA also setup + * their channels 0/127 with the same values they configure here. + * So, there may be some other reason for doing this. + * + * Have to create the entire channel manually, as the real channel + * creation code assumes we have PRAMIN access, and we don't until + * we're done here. + */ + chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER); + if (!chan) + return -ENOMEM; + chan->id = 0; + chan->dev = dev; + chan->file_priv = (struct drm_file *)-2; + dev_priv->fifos[0] = dev_priv->fifos[127] = chan; + + /* Channel's PRAMIN object + heap */ + if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0, + NULL, &chan->ramin))) + return ret; - as = dev_priv->ramin->size; - rv = nouveau_mem_fb_amount(dev) - (1*1024*1024); - pt = rv + 0xd0000; - pts = NV50_INSTMEM_PT_SIZE(as); - cb = rv + 0xc8000; - if ((dev_priv->chipset & 0xf0) != 0x50) { - unk = cb + 0x4200; - cb0 = cb + 0x4240; - cb1 = cb + 0x278; - } else { - unk = cb + 0x5400; - cb0 = cb + 0x5440; - cb1 = cb + 0x1478; - } - - DRM_DEBUG("PRAMIN config:\n"); - DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", rv); - DRM_DEBUG(" Aperture size: %i MiB\n", as >> 20); - DRM_DEBUG(" PT base: 0x%08x\n", pt); - DRM_DEBUG(" PT size: %d KiB\n", pts >> 10); - DRM_DEBUG(" BIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8); - DRM_DEBUG(" Config base: 0x%08x\n", cb); - DRM_DEBUG(" ctxdma Config0: 0x%08x\n", cb0); - DRM_DEBUG(" Config1: 0x%08x\n", cb1); - - /* Map first MiB of reserved vram into BAR0 PRAMIN aperture */ - NV_WRITE(0x1700, (rv>>16)); - /* Poke some regs.. */ - NV_WRITE(0x1704, (cb>>12)); - NV_WRITE(0x1710, (((unk-cb)>>4))|(1<<31)); - NV_WRITE(0x1704, (cb>>12)|(1<<30)); + if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) + return -ENOMEM; - /* CB0, some DMA object, NFI what it points at... Needed however, - * or the PRAMIN aperture doesn't operate as expected. - */ - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x00, 0x7fc00000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x04, 0xe1ffffff); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x08, 0xe0000000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x0c, 0x01000001); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x10, 0x00000000); - NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x14, 0x00000000); - - /* CB1, points at PRAMIN PT */ - NV_WRITE(NV_RAMIN + (cb1 - rv) + 0, pt | 0x63); - NV_WRITE(NV_RAMIN + (cb1 - rv) + 4, 0x00000000); - - /* Zero PRAMIN page table */ - v = NV_RAMIN + (pt - rv); - for (i = v; i < v + pts; i += 8) { - NV_WRITE(i + 0x00, 0x00000009); - NV_WRITE(i + 0x04, 0x00000000); - } - - /* Map page table into PRAMIN aperture */ - for (i = pt; i < pt + pts; i += 0x1000) { - uint32_t pte = NV_RAMIN + (pt-rv) + (((i-pt) >> 12) << 3); - DRM_DEBUG("PRAMIN PTE = 0x%08x @ 0x%08x\n", i, pte); - NV_WRITE(pte + 0x00, i | 1); - NV_WRITE(pte + 0x04, 0x00000000); + /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ + if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc, + 0x4000, 0, NULL, &chan->ramfc))) + return ret; + + for (i = 0; i < c_vmpd; i += 4) + BAR0_WI32(chan->ramin->gpuobj, i, 0); + + /* VM page directory */ + if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd, + 0x4000, 0, &chan->vm_pd, NULL))) + return ret; + for (i = 0; i < 0x4000; i += 8) { + BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000); + BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000); } - /* Points at CB0 */ - NV_WRITE(0x170c, (((cb0 - cb)>>4)|(1<<31))); - - /* Confirm it all worked, should be able to read back the page table's - * PTEs from the PRAMIN BAR + /* PRAMIN page table, cheat and map into VM at 0x0000000000. + * We map the entire fake channel into the start of the PRAMIN BAR */ - NV_WRITE(0x1700, pt >> 16); - if (NV_READ(0x700000) != NV_RI32(0)) { - DRM_ERROR("Failed to init PRAMIN page table\n"); - return -EINVAL; + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, + 0, &priv->pramin_pt))) + return ret; + + for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) { + if (v < (c_offset + c_size)) + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); + else + BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); + BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); } - /* Create a heap to manage PRAMIN aperture allocations */ - ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts); - if (ret) { - DRM_ERROR("Failed to init PRAMIN heap\n"); - return -ENOMEM; + BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); + BAR0_WI32(chan->vm_pd, 0x04, 0x00000000); + + /* DMA object for PRAMIN BAR */ + if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0, + &priv->pramin_bar))) + return ret; + BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000); + BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000); + + /* Poke the relevant regs, and pray it works :) */ + NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12)); + NV_WRITE(NV50_PUNK_UNK1710, 0); + NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) | + NV50_PUNK_BAR_CFG_BASE_VALID); + NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0); + NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) | + NV50_PUNK_BAR3_CTXDMA_VALID); + + /* Assume that praying isn't enough, check that we can re-read the + * entire fake channel back from the PRAMIN BAR */ + for (i = 0; i < c_size; i+=4) { + if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) { + DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i); + return -EINVAL; + } } - DRM_DEBUG("NV50: PRAMIN setup ok\n"); - /* Don't alloc the last MiB of VRAM, probably too much, but be safe - * at least for now. - */ - dev_priv->ramin_rsvd_vram = 1*1024*1024; + /* Global PRAMIN heap */ + if (nouveau_mem_init_heap(&dev_priv->ramin_heap, + c_size, dev_priv->ramin->size - c_size)) { + dev_priv->ramin_heap = NULL; + DRM_ERROR("Failed to init RAMIN heap\n"); + } - /*XXX: probably incorrect, but needed to make hash func "work" */ + /*XXX: incorrect, but needed to make hash func "work" */ dev_priv->ramht_offset = 0x10000; dev_priv->ramht_bits = 9; dev_priv->ramht_size = (1 << dev_priv->ramht_bits); @@ -154,8 +198,11 @@ nv50_instmem_takedown(struct drm_device { struct drm_nouveau_private *dev_priv = dev->dev_private; nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; + struct nouveau_channel *chan = dev_priv->fifos[0]; int i; + DRM_DEBUG("\n"); + if (!priv) return; @@ -163,6 +210,20 @@ nv50_instmem_takedown(struct drm_device for (i = 0x1700; i <= 0x1710; i+=4) NV_WRITE(i, priv->save1700[(i-0x1700)/4]); + nouveau_gpuobj_ref_del(dev, &priv->pramin_bar); + nouveau_gpuobj_ref_del(dev, &priv->pramin_pt); + + /* Destroy dummy channel */ + if (chan) { + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->ramfc); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + nouveau_mem_takedown(&chan->ramin_heap); + + dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; + drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); + } + dev_priv->Engine.instmem.priv = NULL; drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); } @@ -205,6 +266,7 @@ int nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; uint32_t pte, pte_end, vram; if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) @@ -217,19 +279,14 @@ nv50_instmem_bind(struct drm_device *dev pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; vram = gpuobj->im_backing->start; - if (pte == pte_end) { - DRM_ERROR("WARNING: badness in bind() pte calc\n"); - pte_end++; - } - DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); DRM_DEBUG("first vram page: 0x%llx\n", gpuobj->im_backing->start); while (pte < pte_end) { - NV_WI32(pte + 0, vram | 1); - NV_WI32(pte + 4, 0x00000000); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); pte += 8; vram += NV50_INSTMEM_PAGE_SIZE; @@ -243,6 +300,7 @@ int nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; uint32_t pte, pte_end; if (gpuobj->im_bound == 0) @@ -251,8 +309,8 @@ nv50_instmem_unbind(struct drm_device *d pte = (gpuobj->im_pramin->start >> 12) << 3; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; while (pte < pte_end) { - NV_WI32(pte + 0, 0x00000000); - NV_WI32(pte + 4, 0x00000000); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); + INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); pte += 8; } diff-tree 39907f613b6c84499c34c9a6ece5f5dde64788c0 (from 7784e8c6e74b93ffb39d82e3385bd3268a55507c) Author: Ben Skeggs <sk...@gm...> Date: Fri Aug 10 13:53:10 2007 +1000 nouveau: Allow creation of gpuobjs before any other init has taken place. diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 572df46..4d5c7f7 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -370,8 +370,10 @@ extern int nouveau_fifo_alloc(struct dr extern void nouveau_fifo_free(struct nouveau_channel *); /* nouveau_object.c */ +extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); +extern void nouveau_gpuobj_late_takedown(struct drm_device *); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index bb09653..d4142e4 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -265,11 +265,25 @@ nouveau_gpuobj_new(struct drm_device *de } int +nouveau_gpuobj_early_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + DRM_DEBUG("\n"); + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); + + return 0; +} + +int nouveau_gpuobj_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; + DRM_DEBUG("\n"); + if (dev_priv->card_type < NV_50) { if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, dev_priv->ramht_size, @@ -286,12 +300,20 @@ void nouveau_gpuobj_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj = NULL; - struct list_head *entry, *tmp; DRM_DEBUG("\n"); nouveau_gpuobj_del(dev, &dev_priv->ramht); +} + +void +nouveau_gpuobj_late_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + struct list_head *entry, *tmp; + + DRM_DEBUG("\n"); list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { gpuobj = list_entry(entry, struct nouveau_gpuobj, list); diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index e80e77a..eac3806 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -296,7 +296,8 @@ nouveau_card_init(struct drm_device *dev engine = &dev_priv->Engine; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; - INIT_LIST_HEAD(&dev_priv->gpuobj_list); + ret = nouveau_gpuobj_early_init(dev); + if (ret) return ret; /* Initialise instance memory, must happen before mem_init so we * know exactly how much VRAM we're able to use for "normal" @@ -375,6 +376,8 @@ static void nouveau_card_takedown(struct drm_irq_uninstall(dev); + nouveau_gpuobj_late_takedown(dev); + dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; } } diff-tree 7784e8c6e74b93ffb39d82e3385bd3268a55507c (from 7281463f8d5d45a26f4cdff3fb67d896e0e74f74) Author: Ben Skeggs <sk...@gm...> Date: Thu Aug 9 11:12:13 2007 +1000 nouveau: silence irq handler a bit diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index 03c466d..d8a2c1b 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -72,12 +72,10 @@ static void nouveau_fifo_irq_handler(str chstat = NV_READ(NV04_PFIFO_DMA); channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat); - if (status & NV_PFIFO_INTR_CACHE_ERROR) { uint32_t c1get, c1method, c1data; - DRM_ERROR("NV: PFIFO error interrupt\n"); + DRM_ERROR("PFIFO error interrupt\n"); c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2; if (dev_priv->card_type < NV_40) { @@ -89,17 +87,17 @@ static void nouveau_fifo_irq_handler(str c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get)); } - DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n", - channel, (c1method >> 13) & 7, - c1method & 0x1ffc, c1data - ); + DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n", + channel, (c1method >> 13) & 7, c1method & 0x1ffc, + c1data); status &= ~NV_PFIFO_INTR_CACHE_ERROR; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); } if (status & NV_PFIFO_INTR_DMA_PUSHER) { - DRM_INFO("NV: PFIFO DMA pusher interrupt\n"); + DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n", + channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET)); status &= ~NV_PFIFO_INTR_DMA_PUSHER; NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); @@ -113,7 +111,7 @@ static void nouveau_fifo_irq_handler(str } if (status) { - DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status); + DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status); NV_WRITE(NV03_PFIFO_INTR_0, status); } @@ -311,77 +309,31 @@ nouveau_graph_dump_trap_info(struct drm_ ARRAY_SIZE(nouveau_nstatus_names)); printk("\n"); - DRM_ERROR("NV: Channel %d/%d (class 0x%04x) - " - "Method 0x%04x, Data 0x%08x\n", - channel, subc, class, method, data - ); + DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x\n", + channel, subc, class, method, data); } static void nouveau_pgraph_irq_handler(struct drm_device *dev) { - uint32_t status; struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t status, nsource; status = NV_READ(NV03_PGRAPH_INTR); if (!status) return; + nsource = NV_READ(NV03_PGRAPH_NSOURCE); if (status & NV_PGRAPH_INTR_NOTIFY) { - uint32_t nsource, nstatus, instance, notify; - DRM_DEBUG("NV: PGRAPH notify interrupt\n"); + DRM_DEBUG("PGRAPH notify interrupt\n"); - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - /* if this wasn't NOTIFICATION_PENDING, dump extra trap info */ - if (nsource & ~(1<<0)) { - nouveau_graph_dump_trap_info(dev); - } else { - instance = NV_READ(0x00400158); - notify = NV_READ(0x00400150) >> 16; - DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", - instance, notify); - } + nouveau_graph_dump_trap_info(dev); status &= ~NV_PGRAPH_INTR_NOTIFY; NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); } - if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) { - uint32_t nsource, nstatus, instance, notify; - DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); - - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - instance = NV_READ(0x00400158); - notify = NV_READ(0x00400150) >> 16; - DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify); - - status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_BUFFER_NOTIFY); - } - - if (status & NV_PGRAPH_INTR_MISSING_HW) { - DRM_ERROR("NV: PGRAPH missing hw interrupt\n"); - - status &= ~NV_PGRAPH_INTR_MISSING_HW; - NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_MISSING_HW); - } - if (status & NV_PGRAPH_INTR_ERROR) { - uint32_t nsource, nstatus, instance; - - DRM_ERROR("NV: PGRAPH error interrupt\n"); - - nstatus = NV_READ(NV03_PGRAPH_NSTATUS); - nsource = NV_READ(NV03_PGRAPH_NSOURCE); - DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); - - instance = NV_READ(0x00400158); - DRM_ERROR("instance:0x%08x\n", instance); + DRM_ERROR("PGRAPH error interrupt\n"); nouveau_graph_dump_trap_info(dev); @@ -391,7 +343,7 @@ static void nouveau_pgraph_irq_handler(s if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); - DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel); + DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel); switch(dev_priv->card_type) { case NV_04: @@ -408,7 +360,7 @@ static void nouveau_pgraph_irq_handler(s nouveau_nv20_context_switch(dev); break; default: - DRM_INFO("NV: Context switch not implemented\n"); + DRM_ERROR("Context switch not implemented\n"); break; } @@ -417,7 +369,7 @@ static void nouveau_pgraph_irq_handler(s } if (status) { - DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status); + DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status); NV_WRITE(NV03_PGRAPH_INTR, status); } @@ -427,6 +379,7 @@ static void nouveau_pgraph_irq_handler(s static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) { struct drm_nouveau_private *dev_priv = dev->dev_private; + if (crtc&1) { NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); } @@ -446,16 +399,16 @@ irqreturn_t nouveau_irq_handler(DRM_IRQ_ if (!status) return IRQ_NONE; - DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status); - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { nouveau_fifo_irq_handler(dev); status &= ~NV_PMC_INTR_0_PFIFO_PENDING; } + if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { nouveau_pgraph_irq_handler(dev); status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; } + if (status & NV_PMC_INTR_0_CRTCn_PENDING) { nouveau_crtc_irq_handler(dev, (status>>24)&3); status &= ~NV_PMC_INTR_0_CRTCn_PENDING; |
From: <dar...@ke...> - 2007-08-22 06:59:43
|
shared-core/nouveau_reg.h | 4 +++ shared-core/nv40_graph.c | 54 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 6 deletions(-) New commits: diff-tree 11c46afe7599cf3cefd30a7e55325a1a1aa8e5ba (from a654c0341a7892307522ed6e7f4518cc7e28a99e) Author: Ben Skeggs <sk...@gm...> Date: Wed Aug 22 13:23:49 2007 +1000 nouveau/nv40: Preserve other bits in 0x400304/0x400310 like NVIDIA do. diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 25ee5c7..26237c7 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1293,20 +1293,26 @@ static int nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t old_cp, tv = 1000; + uint32_t old_cp, tv = 1000, tmp; int i; old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); - NV_WRITE(NV40_PGRAPH_CTXCTL_0310, - save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : - NV40_PGRAPH_CTXCTL_0310_XFER_LOAD); - NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); + + tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310); + tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : + NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; + NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); + + tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304); + tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; + NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); for (i = 0; i < tv; i++) { if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) break; } + NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); if (i == tv) { diff-tree a654c0341a7892307522ed6e7f4518cc7e28a99e (from 81eaff44c47cfb23e96b1cb848df5fd7ea24f913) Author: Ben Skeggs <sk...@gm...> Date: Wed Aug 22 13:17:19 2007 +1000 nouveau/nv40: Dump extra info on ucode state if ctx switch fails. diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 6561462..1023e75 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -178,6 +178,10 @@ #define NV10_PGRAPH_CTX_CACHE5 0x004001E0 #define NV40_PGRAPH_CTXCTL_0304 0x00400304 #define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24 +#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff #define NV40_PGRAPH_CTXCTL_0310 0x00400310 #define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 #define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 8882e62..25ee5c7 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1310,7 +1310,11 @@ nv40_graph_transfer_context(struct drm_d NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); if (i == tv) { - DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); + uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT); + DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save); + DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n", + ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, + ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(NV40_PGRAPH_CTXCTL_030C)); return -EBUSY; diff-tree 81eaff44c47cfb23e96b1cb848df5fd7ea24f913 (from ae883c97ad7af5529d40c8d52c2da614d34233e0) Author: Ben Skeggs <sk...@gm...> Date: Wed Aug 22 13:09:27 2007 +1000 nouveau: NV4c ctx ucode. Seems we already have a nv4c_ctx_init() somehow, a quick check shows the ucode matches it still. diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 0e6028a..8882e62 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1584,6 +1584,37 @@ static uint32_t nv4a_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; +static uint32_t nv4c_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, + 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, + 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968, + 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6, + 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, + 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, + 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, + 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, + 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, + 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a, + 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080, + 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168, + 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, + 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, + 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306, + 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + static uint32_t nv4e_ctx_voodoo[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, @@ -1648,6 +1679,7 @@ nv40_graph_init(struct drm_device *dev) case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; + case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break; case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; default: DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", |
From: <pq...@ke...> - 2007-10-02 19:19:56
|
shared-core/nv20_graph.c | 147 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 142 insertions(+), 5 deletions(-) New commits: diff-tree a72eb27fbc7a66e35018ffbcb5137cfaaf4049aa (from afc57ef1dfb5bdf17411505d4dfbb03863a870bf) Author: Pekka Paalanen <pq...@ik...> Date: Tue Oct 2 21:56:01 2007 +0300 nouveau: nv20 graph_create_context difference nv20 writes the chan->id to a different place than nv28. This still does not make nv20 run nv10_demo. diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 6b4c25e..aba5a7e 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -3004,12 +3004,14 @@ int nv20_graph_create_context(struct nou struct drm_nouveau_private *dev_priv = dev->dev_private; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); unsigned int ctx_size; + unsigned int idoffs = 0x28/4; int ret; switch (dev_priv->chipset) { case 0x20: ctx_size = NV20_GRCTX_SIZE; ctx_init = nv20_graph_context_init; + idoffs = 0; break; case 0x25: case 0x28: @@ -3048,7 +3050,7 @@ int nv20_graph_create_context(struct nou ctx_init(dev, chan->ramin_grctx->gpuobj); /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ - INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); + INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1); /* CTX_USER */ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, diff-tree afc57ef1dfb5bdf17411505d4dfbb03863a870bf (from ffa3173ec4bb5a310b3f8539bb6c2f8589ce2ed5) Author: Pekka Paalanen <pq...@ik...> Date: Tue Oct 2 21:51:14 2007 +0300 nouveau: fix nv25_graph_context_init It was writing 4x the data in a loop. diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 213d60c..6b4c25e 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -256,11 +256,10 @@ write32 #1 block at +0x00740d34 NV_PRAMI INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001); INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000); INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000); - for (i=0; i<0x880; i+=4) { + for (i=0; i < 0x880/4; i+=4) { INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9); INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c); INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b); - INSTANCE_WR(ctx, (0x1b04/4)+i+3, 0x00000000); } /* diff-tree ffa3173ec4bb5a310b3f8539bb6c2f8589ce2ed5 (from 69fcfb413e72ad2204d306f20af6547819e040da) Author: Stuart Bennett <sb...@ca...> Date: Tue Oct 2 15:45:30 2007 +0100 nouveau: nv20 graph context init diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 8291f21..213d60c 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -19,14 +19,146 @@ * */ -/*#define NV20_GRCTX_SIZE (3529*4)*/ - +#define NV20_GRCTX_SIZE (3580*4) #define NV25_GRCTX_SIZE (3529*4) #define NV30_31_GRCTX_SIZE (22392) #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) +static void nv20_graph_context_init(struct drm_device *dev, + struct nouveau_gpuobj *ctx) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i; +/* +write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements: ++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000 ++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 + ++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000 ++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000 ++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303 ++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000 ++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000 ++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 ++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 ++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 ++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 + ++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000 ++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +... +*/ + INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000); + INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000); + INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000); + INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101); + INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111); + INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8); + for (i = 0; i < 4; ++i) + INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); + for (i = 0; i < 16; ++i) + INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); + INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7ffff); + INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); + INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); + INSTANCE_WR(ctx, (0x33c/4)+183, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); + +/* +... ++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 ++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 +... ++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000 ++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 +... +*/ + for (i = 0; i < 0x880; i += 0x10) { + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9); + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c); + INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b); + } + +/* +write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements: ++0x00742fbc: 3f800000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x281c/4), 0x3f800000); + +/* +write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements: ++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000 ++0x0074301c: 00000000 bf800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000); + INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000); + INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000); + INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000); + INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000); + INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000); + INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000); + +/* +write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements: ++0x00742fcc: 00000000 3f800000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000); + +/* +write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements: ++0x0074302c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements: ++0x00743c9c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements: ++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000); + +/* +write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements: ++0x00743c6c: 00000000 00000000 00000000 00000000 +write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements: ++0x00743ccc: 00000000 000003f8 00000000 00000000 +*/ + INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8); + +/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */ + INSTANCE_WR(ctx, 0x3540/4, 0x002fe000); + +/* +write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements: ++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c +*/ + for (i = 0; i < 8; ++i) + INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c); +} static void nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) @@ -2876,6 +3008,10 @@ int nv20_graph_create_context(struct nou int ret; switch (dev_priv->chipset) { + case 0x20: + ctx_size = NV20_GRCTX_SIZE; + ctx_init = nv20_graph_context_init; + break; case 0x25: case 0x28: ctx_size = NV25_GRCTX_SIZE; |
From: <pq...@ke...> - 2007-10-13 15:30:08
|
shared-core/nouveau_notifier.c | 13 ++----------- shared-core/nv20_graph.c | 6 +++--- 2 files changed, 5 insertions(+), 14 deletions(-) New commits: diff-tree 3ab7627651f4c48a114d91158d41e4c4f528c4cc (from 50deb31e9ff556f941449bc788821eaa2e5f9e34) Author: Pekka Paalanen <pq...@ik...> Date: Fri Oct 12 23:55:59 2007 +0300 nouveau: Fix a typo in nv25_graph_context_init diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 6b2aa5a..ae0e085 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -283,7 +283,7 @@ write32 #1 block at +0x00740a7c NV_PRAMI INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008); for (i=0; i<16; ++i) INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000); - INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7ffff); + INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff); /* write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements: diff-tree 50deb31e9ff556f941449bc788821eaa2e5f9e34 (from 0d2554f83e72cae1bc44e476fbed4fc78873264f) Author: Stuart Bennett <sb...@ca...> Date: Tue Oct 9 20:39:10 2007 +0100 nouveau: Fix typos in nv20_graph_context_init diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 9edab59..6b2aa5a 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -86,11 +86,11 @@ write32 #1 block at +0x00740adc NV_PRAMI INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008); for (i = 0; i < 16; ++i) INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000); - INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7ffff); + INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff); INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001); INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000); INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001); - INSTANCE_WR(ctx, (0x33c/4)+183, 0x00004000); + INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000); INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000); /* diff-tree 0d2554f83e72cae1bc44e476fbed4fc78873264f (from 9d779e2c88a02f5f9d57618145654610f0f10e28) Author: Pekka Paalanen <pq...@ik...> Date: Fri Oct 12 23:43:31 2007 +0300 nouveau: Make notifiers go into PCI memory On some hardware notifers in AGP memory just don't work. diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index d3b7968..c361bc6 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -36,17 +36,8 @@ nouveau_notifier_init_channel(struct nou struct drm_nouveau_private *dev_priv = dev->dev_private; int flags, ret; - /*TODO: PCI notifier blocks */ -#ifndef __powerpc__ - if (dev_priv->agp_heap) - flags = NOUVEAU_MEM_AGP; - else -#endif - if (dev_priv->pci_heap) - flags = NOUVEAU_MEM_PCI; - else - flags = NOUVEAU_MEM_FB; - flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE); + flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED | + NOUVEAU_MEM_FB_ACCEPTABLE); chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, (struct drm_file *)-2); |
From: <ai...@ke...> - 2007-11-05 09:09:45
|
linux-core/drm_bo.c | 218 ++++++++++++++++++++++------------------------- linux-core/drm_bo_lock.c | 9 - linux-core/drm_bo_move.c | 59 +++++------- linux-core/drm_dma.c | 2 linux-core/drm_object.c | 36 +++---- linux-core/drm_objects.h | 195 +++++++++++++++++++++--------------------- linux-core/drm_ttm.c | 63 ++++++------- 7 files changed, 279 insertions(+), 303 deletions(-) New commits: commit 6ee5412da0cc7516472235805482b8632cb374ef Author: Dave Airlie <ai...@li...> Date: Mon Nov 5 19:09:18 2007 +1000 drm/ttm: apply linux kernel coding style to bo_lock/move/object/ttm.c diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c index 46318f6..f967fb7 100644 --- a/linux-core/drm_bo_lock.c +++ b/linux-core/drm_bo_lock.c @@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock) if (atomic_read(&lock->readers) == 0) wake_up_interruptible(&lock->queue); } - EXPORT_SYMBOL(drm_bo_read_unlock); int drm_bo_read_lock(struct drm_bo_lock *lock) @@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock) } return 0; } - EXPORT_SYMBOL(drm_bo_read_lock); static int __drm_bo_write_unlock(struct drm_bo_lock *lock) @@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) int ret = 0; struct drm_device *dev; - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) { + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) return -EINVAL; - } while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { ret = wait_event_interruptible @@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) ret = drm_add_user_object(file_priv, &lock->base, 0); lock->base.remove = &drm_bo_write_lock_remove; lock->base.type = drm_lock_type; - if (ret) { + if (ret) (void)__drm_bo_write_unlock(lock); - } + mutex_unlock(&dev->struct_mutex); return ret; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 9ab28b0..2aba327 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,7 +35,7 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(struct drm_buffer_object * bo) +static void drm_bo_free_old_node(struct drm_buffer_object *bo) { struct drm_bo_mem_reg *old_mem = &bo->mem; @@ -48,8 +48,8 @@ static void drm_bo_free_old_node(struct drm_buffer_object * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem) { struct drm_ttm *ttm = bo->ttm; struct drm_bo_mem_reg *old_mem = &bo->mem; @@ -82,7 +82,6 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } - EXPORT_SYMBOL(drm_bo_move_ttm); /** @@ -100,7 +99,7 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, +int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem, void **virtual) { struct drm_buffer_manager *bm = &dev->bm; @@ -136,7 +135,7 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap); * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, +void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem, void *virtual) { struct drm_buffer_manager *bm; @@ -145,9 +144,8 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, bm = &dev->bm; man = &bm->man[mem->mem_type]; - if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) iounmap(virtual); - } } static int drm_copy_io_page(void *dst, void *src, unsigned long page) @@ -163,7 +161,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, + unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -181,7 +180,7 @@ static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long p return 0; } -static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -199,8 +198,8 @@ static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long p return 0; } -int drm_bo_move_memcpy(struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem) { struct drm_device *dev = bo->dev; struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; @@ -251,7 +250,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo, goto out1; } mb(); - out2: +out2: drm_bo_free_old_node(bo); *old_mem = *new_mem; @@ -265,13 +264,12 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo, bo->ttm = NULL; } - out1: +out1: drm_mem_reg_iounmap(dev, new_mem, new_iomap); - out: +out: drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } - EXPORT_SYMBOL(drm_bo_move_memcpy); /* @@ -280,8 +278,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(struct drm_buffer_object * bo, - struct drm_buffer_object ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object *bo, + struct drm_buffer_object **new_obj) { struct drm_buffer_object *fbo; struct drm_device *dev = bo->dev; @@ -322,12 +320,10 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, - int evict, - int no_wait, - uint32_t fence_class, - uint32_t fence_type, - uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, + int evict, int no_wait, uint32_t fence_class, + uint32_t fence_type, uint32_t fence_flags, + struct drm_bo_mem_reg *new_mem) { struct drm_device *dev = bo->dev; struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; @@ -407,7 +403,6 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } - EXPORT_SYMBOL(drm_bo_move_accel_cleanup); int drm_bo_same_page(unsigned long offset, @@ -420,13 +415,11 @@ EXPORT_SYMBOL(drm_bo_same_page); unsigned long drm_bo_offset_end(unsigned long offset, unsigned long end) { - offset = (offset + PAGE_SIZE) & PAGE_MASK; return (end < offset) ? end : offset; } EXPORT_SYMBOL(drm_bo_offset_end); - static pgprot_t drm_kernel_io_prot(uint32_t map_type) { pgprot_t tmp = PAGE_KERNEL; @@ -475,8 +468,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, return (!map->virtual) ? -ENOMEM : 0; } -static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, - unsigned long num_pages, struct drm_bo_kmap_obj *map) +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, + unsigned long start_page, unsigned long num_pages, + struct drm_bo_kmap_obj *map) { struct drm_device *dev = bo->dev; struct drm_bo_mem_reg *mem = &bo->mem; @@ -503,7 +497,7 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag * Populate the part we're mapping; */ - for (i = start_page; i< start_page + num_pages; ++i) { + for (i = start_page; i < start_page + num_pages; ++i) { d = drm_ttm_get_page(ttm, i); if (!d) return -ENOMEM; @@ -530,7 +524,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag * and caching policy the buffer currently has. * Mapping multiple pages or buffers that live in io memory is a bit slow and * consumes vmalloc space. Be restrictive with such mappings. - * Mapping single pages usually returns the logical kernel address, (which is fast) + * Mapping single pages usually returns the logical kernel address, + * (which is fast) * BUG may use slower temporary mappings for high memory pages or * uncached / write-combined pages. * @@ -581,7 +576,7 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map) if (!map->virtual) return; - switch(map->bo_kmap_type) { + switch (map->bo_kmap_type) { case bo_map_iomap: iounmap(map->virtual); break; diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index a6d6c0d..5cc1c8d 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,7 +30,7 @@ #include "drmP.h" -int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, +int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, int shareable) { struct drm_device *dev = priv->head->dev; @@ -56,7 +56,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, } EXPORT_SYMBOL(drm_add_user_object); -struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key) { struct drm_device *dev = priv->head->dev; struct drm_hash_item *hash; @@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t DRM_ASSERT_LOCKED(&dev->struct_mutex); ret = drm_ht_find_item(&dev->object_hash, key, &hash); - if (ret) { + if (ret) return NULL; - } + item = drm_hash_entry(hash, struct drm_user_object, hash); if (priv != item->owner) { @@ -83,7 +83,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t } EXPORT_SYMBOL(drm_lookup_user_object); -static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) +static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item) { struct drm_device *dev = priv->head->dev; int ret; @@ -95,7 +95,7 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object } } -static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, +static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro, enum drm_ref_type action) { int ret = 0; @@ -114,7 +114,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object return ret; } -int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, +int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object, enum drm_ref_type ref_action) { int ret = 0; @@ -167,12 +167,12 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc list_add(&item->list, &priv->refd_objects); ret = drm_object_ref_action(priv, referenced_object, ref_action); - out: +out: return ret; } -struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, - struct drm_user_object * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, enum drm_ref_type ref_action) { struct drm_hash_item *hash; @@ -188,8 +188,8 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, } EXPORT_SYMBOL(drm_lookup_ref_object); -static void drm_remove_other_references(struct drm_file * priv, - struct drm_user_object * ro) +static void drm_remove_other_references(struct drm_file *priv, + struct drm_user_object *ro) { int i; struct drm_open_hash *ht; @@ -205,7 +205,7 @@ static void drm_remove_other_references(struct drm_file * priv, } } -void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) +void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item) { int ret; struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; @@ -235,8 +235,8 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) } -int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, - enum drm_object_type type, struct drm_user_object ** object) +int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, + enum drm_object_type type, struct drm_user_object **object) { struct drm_device *dev = priv->head->dev; struct drm_user_object *uo; @@ -260,12 +260,12 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, mutex_unlock(&dev->struct_mutex); *object = uo; return 0; - out_err: +out_err: mutex_unlock(&dev->struct_mutex); return ret; } -int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, +int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, enum drm_object_type type) { struct drm_device *dev = priv->head->dev; @@ -287,7 +287,7 @@ int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, drm_remove_ref_object(priv, ro); mutex_unlock(&dev->struct_mutex); return 0; - out_err: +out_err: mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 4d51f9f..8a44070 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush); * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(struct drm_ttm * ttm) +static void ttm_alloc_pages(struct drm_ttm *ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -54,20 +54,19 @@ static void ttm_alloc_pages(struct drm_ttm * ttm) if (drm_alloc_memctl(size)) return; - if (size <= PAGE_SIZE) { + if (size <= PAGE_SIZE) ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); - } + if (!ttm->pages) { ttm->pages = vmalloc_user(size); if (ttm->pages) ttm->page_flags |= DRM_TTM_PAGE_VMALLOC; } - if (!ttm->pages) { + if (!ttm->pages) drm_free_memctl(size); - } } -static void ttm_free_pages(struct drm_ttm * ttm) +static void ttm_free_pages(struct drm_ttm *ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -85,9 +84,9 @@ static struct page *drm_ttm_alloc_page(void) { struct page *page; - if (drm_alloc_memctl(PAGE_SIZE)) { + if (drm_alloc_memctl(PAGE_SIZE)) return NULL; - } + page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); if (!page) { drm_free_memctl(PAGE_SIZE); @@ -106,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(struct drm_ttm * ttm, int noncached) +static int drm_set_caching(struct drm_ttm *ttm, int noncached) { int i; struct page **cur_page; @@ -153,7 +152,7 @@ static void drm_ttm_free_user_pages(struct drm_ttm *ttm) dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); down_read(&mm->mmap_sem); - for (i=0; i<ttm->num_pages; ++i) { + for (i = 0; i < ttm->num_pages; ++i) { page = ttm->pages[i]; if (page == NULL) continue; @@ -186,14 +185,10 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) #else ClearPageReserved(*cur_page); #endif - if (page_count(*cur_page) != 1) { - DRM_ERROR("Erroneous page count. " - "Leaking pages.\n"); - } - if (page_mapped(*cur_page)) { - DRM_ERROR("Erroneous map count. " - "Leaking page mappings.\n"); - } + if (page_count(*cur_page) != 1) + DRM_ERROR("Erroneous page count. Leaking pages.\n"); + if (page_mapped(*cur_page)) + DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); __free_page(*cur_page); drm_free_memctl(PAGE_SIZE); --bm->cur_pages; @@ -205,7 +200,7 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(struct drm_ttm * ttm) +int drm_destroy_ttm(struct drm_ttm *ttm) { struct drm_ttm_backend *be; @@ -234,7 +229,7 @@ int drm_destroy_ttm(struct drm_ttm * ttm) return 0; } -struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index) { struct page *p; struct drm_buffer_manager *bm = &ttm->dev->bm; @@ -283,10 +278,9 @@ int drm_ttm_set_user(struct drm_ttm *ttm, return -ENOMEM; } - for (i=0; i<num_pages; ++i) { - if (ttm->pages[i] == NULL) { + for (i = 0; i < num_pages; ++i) { + if (ttm->pages[i] == NULL) ttm->pages[i] = ttm->dummy_read_page; - } } return 0; @@ -294,7 +288,7 @@ int drm_ttm_set_user(struct drm_ttm *ttm, -int drm_ttm_populate(struct drm_ttm * ttm) +int drm_ttm_populate(struct drm_ttm *ttm) { struct page *page; unsigned long i; @@ -318,7 +312,7 @@ int drm_ttm_populate(struct drm_ttm * ttm) * Initialize a ttm. */ -struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size) { struct drm_bo_driver *bo_driver = dev->driver->bo_driver; struct drm_ttm *ttm; @@ -362,7 +356,7 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(struct drm_ttm * ttm) +void drm_ttm_evict(struct drm_ttm *ttm) { struct drm_ttm_backend *be = ttm->be; int ret; @@ -375,19 +369,18 @@ void drm_ttm_evict(struct drm_ttm * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(struct drm_ttm * ttm) +void drm_ttm_fixup_caching(struct drm_ttm *ttm) { if (ttm->state == ttm_evicted) { struct drm_ttm_backend *be = ttm->be; - if (be->func->needs_ub_cache_adjust(be)) { + if (be->func->needs_ub_cache_adjust(be)) drm_set_caching(ttm, 0); - } ttm->state = ttm_unbound; } } -void drm_ttm_unbind(struct drm_ttm * ttm) +void drm_ttm_unbind(struct drm_ttm *ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -395,7 +388,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) +int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) { struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; int ret = 0; @@ -412,13 +405,14 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) if (ret) return ret; - if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { + if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); - } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && + else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && bo_driver->ttm_cache_flush) bo_driver->ttm_cache_flush(ttm); - if ((ret = be->func->bind(be, bo_mem))) { + ret = be->func->bind(be, bo_mem); + if (ret) { ttm->state = ttm_evicted; DRM_ERROR("Couldn't bind backend.\n"); return ret; @@ -429,5 +423,4 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; return 0; } - EXPORT_SYMBOL(drm_bind_ttm); commit 7ad38907075852e347b5b4432c96d199387f5ce8 Author: Dave Airlie <ai...@li...> Date: Mon Nov 5 19:05:32 2007 +1000 drm/ttm: kernel coding style for bo.c and objects.h diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4cdf889..91bc0c7 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -36,23 +36,23 @@ * The buffer usage atomic_t needs to be protected by dev->struct_mutex * when there is a chance that it can be zero before or after the operation. * - * dev->struct_mutex also protects all lists and list heads. Hash tables and hash - * heads. + * dev->struct_mutex also protects all lists and list heads, + * Hash tables and hash heads. * * bo->mutex protects the buffer object itself excluding the usage field. - * bo->mutex does also protect the buffer list heads, so to manipulate those, we need - * both the bo->mutex and the dev->struct_mutex. + * bo->mutex does also protect the buffer list heads, so to manipulate those, + * we need both the bo->mutex and the dev->struct_mutex. * - * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit - * complicated. When dev->struct_mutex is released to grab bo->mutex, the list - * traversal will, in general, need to be restarted. + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal + * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, + * the list traversal will, in general, need to be restarted. * */ -static void drm_bo_destroy_locked(struct drm_buffer_object * bo); -static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); -static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); -static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object *bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); static inline uint64_t drm_bo_type_flags(unsigned type) { @@ -63,7 +63,7 @@ static inline uint64_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) { struct drm_mem_type_manager *man; @@ -74,7 +74,7 @@ void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(struct drm_buffer_object * bo) +void drm_bo_add_to_lru(struct drm_buffer_object *bo) { struct drm_mem_type_manager *man; @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo) } } -static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(struct drm_buffer_object * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object *bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,7 +133,7 @@ static void drm_bo_vm_post_move(struct drm_buffer_object * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(struct drm_buffer_object * bo) +static int drm_bo_add_ttm(struct drm_buffer_object *bo) { struct drm_device *dev = bo->dev; int ret = 0; @@ -171,8 +171,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) return ret; } -static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, - struct drm_bo_mem_reg * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, int evict, int no_wait) { struct drm_device *dev = bo->dev; @@ -255,7 +255,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, return 0; - out_err: +out_err: if (old_is_pci || new_is_pci) drm_bo_vm_post_move(bo); @@ -274,7 +274,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -286,11 +286,10 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, drm_fence_usage_deref_unlocked(&bo->fence); return 0; } - if (no_wait) { + if (no_wait) return -EBUSY; - } - ret = - drm_fence_object_wait(bo->fence, lazy, ignore_signals, + + ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals, bo->fence_type); if (ret) return ret; @@ -301,7 +300,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, } EXPORT_SYMBOL(drm_bo_wait); -static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) { struct drm_device *dev = bo->dev; struct drm_buffer_manager *bm = &dev->bm; @@ -336,7 +335,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) { struct drm_device *dev = bo->dev; struct drm_buffer_manager *bm = &dev->bm; @@ -358,9 +357,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) mutex_lock(&dev->struct_mutex); - if (!atomic_dec_and_test(&bo->usage)) { + if (!atomic_dec_and_test(&bo->usage)) goto out; - } if (!bo->fence) { list_del_init(&bo->lru); @@ -388,7 +386,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); } - out: +out: mutex_unlock(&bo->mutex); return; } @@ -398,7 +396,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(struct drm_buffer_object * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object *bo) { struct drm_device *dev = bo->dev; struct drm_buffer_manager *bm = &dev->bm; @@ -427,7 +425,6 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo) atomic_dec(&bm->count); - // BUG_ON(!list_empty(&bo->base.list)); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); return; @@ -447,7 +444,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo) * Call dev->struct_mutex locked. */ -static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) { struct drm_buffer_manager *bm = &dev->bm; @@ -466,9 +463,8 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) drm_bo_cleanup_refs(entry, remove_all); - if (nentry) { + if (nentry) atomic_dec(&nentry->usage); - } } } @@ -502,21 +498,20 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) { - struct drm_buffer_object *tmp_bo = *bo; + struct drm_buffer_object *tmp_bo = *bo; bo = NULL; DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); - if (atomic_dec_and_test(&tmp_bo->usage)) { + if (atomic_dec_and_test(&tmp_bo->usage)) drm_bo_destroy_locked(tmp_bo); - } } EXPORT_SYMBOL(drm_bo_usage_deref_locked); -static void drm_bo_base_deref_locked(struct drm_file * file_priv, - struct drm_user_object * uo) +static void drm_bo_base_deref_locked(struct drm_file *file_priv, + struct drm_user_object *uo) { struct drm_buffer_object *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); @@ -527,7 +522,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv, drm_bo_usage_deref_locked(&bo); } -void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -583,8 +578,8 @@ EXPORT_SYMBOL(drm_putback_buffer_objects); int drm_fence_buffer_objects(struct drm_device *dev, struct list_head *list, uint32_t fence_flags, - struct drm_fence_object * fence, - struct drm_fence_object ** used_fence) + struct drm_fence_object *fence, + struct drm_fence_object **used_fence) { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *entry; @@ -668,7 +663,7 @@ int drm_fence_buffer_objects(struct drm_device *dev, l = list->next; } DRM_DEBUG("Fenced %d buffers\n", count); - out: +out: mutex_unlock(&dev->struct_mutex); *used_fence = fence; return ret; @@ -679,7 +674,7 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, int no_wait) { int ret = 0; @@ -687,7 +682,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, struct drm_bo_mem_reg evict_mem; /* - * Someone might have modified the buffer before we took the buffer mutex. + * Someone might have modified the buffer before we took the + * buffer mutex. */ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) @@ -738,7 +734,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); - out: +out: return ret; } @@ -746,8 +742,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ -static int drm_bo_mem_force_space(struct drm_device * dev, - struct drm_bo_mem_reg * mem, +static int drm_bo_mem_force_space(struct drm_device *dev, + struct drm_bo_mem_reg *mem, uint32_t mem_type, int no_wait) { struct drm_mm_node *node; @@ -795,10 +791,10 @@ static int drm_bo_mem_force_space(struct drm_device * dev, return 0; } -static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, int disallow_fixed, uint32_t mem_type, - uint64_t mask, uint32_t * res_mask) + uint64_t mask, uint32_t *res_mask) { uint64_t cur_flags = drm_bo_type_flags(mem_type); uint64_t flag_diff; @@ -831,7 +827,7 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ((mask & DRM_BO_FLAG_MAPPABLE) || - (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) + (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) return 0; *res_mask = cur_flags; @@ -846,8 +842,8 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, * drm_bo_mem_force_space is attempted in priority order to evict and find * space. */ -int drm_bo_mem_space(struct drm_buffer_object * bo, - struct drm_bo_mem_reg * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, int no_wait) { struct drm_device *dev = bo->dev; struct drm_buffer_manager *bm = &dev->bm; @@ -941,10 +937,9 @@ int drm_bo_mem_space(struct drm_buffer_object * bo, ret = (has_eagain) ? -EAGAIN : -ENOMEM; return ret; } - EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(struct drm_buffer_object * bo, +static int drm_bo_new_mask(struct drm_buffer_object *bo, uint64_t new_flags, uint64_t used_mask) { uint32_t new_props; @@ -957,15 +952,12 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, } if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes.\n"); + DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n"); return -EPERM; } if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); + DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); return -EPERM; } @@ -1015,7 +1007,7 @@ EXPORT_SYMBOL(drm_lookup_buffer_object); * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(struct drm_buffer_object * bo) +static int drm_bo_quick_busy(struct drm_buffer_object *bo) { struct drm_fence_object *fence = bo->fence; @@ -1035,7 +1027,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(struct drm_buffer_object * bo) +static int drm_bo_busy(struct drm_buffer_object *bo) { struct drm_fence_object *fence = bo->fence; @@ -1055,7 +1047,7 @@ static int drm_bo_busy(struct drm_buffer_object * bo) return 0; } -static int drm_bo_evict_cached(struct drm_buffer_object * bo) +static int drm_bo_evict_cached(struct drm_buffer_object *bo) { int ret = 0; @@ -1069,7 +1061,7 @@ static int drm_bo_evict_cached(struct drm_buffer_object * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) { int ret = 0; @@ -1085,7 +1077,7 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(struct drm_buffer_object * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object *bo) { int ret; @@ -1100,7 +1092,7 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo) * Until then, we cannot really do anything with it except delete it. */ -static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1133,7 +1125,7 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, +static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, struct drm_bo_info_rep *rep) { if (!rep) @@ -1237,7 +1229,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, } else drm_bo_fill_rep_arg(bo, rep); - out: +out: mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); return ret; @@ -1266,7 +1258,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) drm_remove_ref_object(file_priv, ro); drm_bo_usage_deref_locked(&bo); - out: +out: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1276,7 +1268,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) */ static void drm_buffer_user_object_unmap(struct drm_file *file_priv, - struct drm_user_object * uo, + struct drm_user_object *uo, enum drm_ref_type action) { struct drm_buffer_object *bo = @@ -1298,7 +1290,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; @@ -1338,7 +1330,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: +out_unlock: if (ret || !move_unfenced) { mutex_lock(&dev->struct_mutex); if (mem.mm_node) { @@ -1353,7 +1345,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1361,9 +1353,9 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ - (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { - return 0; - } + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) + return 0; + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && ((mem->mask & DRM_BO_FLAG_MAPPABLE) || (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) @@ -1375,7 +1367,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) * bo locked. */ -static int drm_buffer_object_validate(struct drm_buffer_object * bo, +static int drm_buffer_object_validate(struct drm_buffer_object *bo, uint32_t fence_class, int move_unfenced, int no_wait) { @@ -1418,7 +1410,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, ret = drm_bo_wait_unmapped(bo, no_wait); if (ret) { - DRM_ERROR("Timed out waiting for buffer unmap.\n"); + DRM_ERROR("Timed out waiting for buffer unmap.\n"); return ret; } @@ -1535,12 +1527,12 @@ out: EXPORT_SYMBOL(drm_bo_do_validate); -int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, +int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, int use_old_fence_class, - struct drm_bo_info_rep * rep, + struct drm_bo_info_rep *rep, struct drm_buffer_object **bo_rep) { struct drm_device *dev = file_priv->head->dev; @@ -1588,9 +1580,9 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); - if (!bo) { + if (!bo) return -EINVAL; - } + mutex_lock(&bo->mutex); if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) (void)drm_bo_busy(bo); @@ -1613,9 +1605,8 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); - if (!bo) { + if (!bo) return -EINVAL; - } mutex_lock(&bo->mutex); ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1627,7 +1618,7 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, drm_bo_fill_rep_arg(bo, rep); - out: +out: mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); return ret; @@ -1640,7 +1631,7 @@ int drm_buffer_object_create(struct drm_device *dev, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - struct drm_buffer_object ** buf_obj) + struct drm_buffer_object **buf_obj) { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *bo; @@ -1706,7 +1697,7 @@ int drm_buffer_object_create(struct drm_device *dev, *buf_obj = bo; return 0; - out_err: +out_err: mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); @@ -1731,7 +1722,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv, bo->base.ref_struct_locked = NULL; bo->base.unref = drm_buffer_user_object_unmap; - out: +out: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1921,7 +1912,7 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } -static int drm_bo_leave_list(struct drm_buffer_object * bo, +static int drm_bo_leave_list(struct drm_buffer_object *bo, uint32_t mem_type, int free_pinned, int allow_errors) @@ -1967,7 +1958,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo, } } - out: +out: mutex_unlock(&bo->mutex); return ret; } @@ -1986,7 +1977,7 @@ static struct drm_buffer_object *drm_bo_entry(struct list_head *list, * dev->struct_mutex locked. */ -static int drm_bo_force_list_clean(struct drm_device * dev, +static int drm_bo_force_list_clean(struct drm_device *dev, struct list_head *head, unsigned mem_type, int free_pinned, @@ -2051,7 +2042,7 @@ restart: return 0; } -int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type) { struct drm_buffer_manager *bm = &dev->bm; struct drm_mem_type_manager *man = &bm->man[mem_type]; @@ -2093,7 +2084,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm); *point since we have the hardware lock. */ -static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) { int ret; struct drm_buffer_manager *bm = &dev->bm; @@ -2118,7 +2109,7 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) return ret; } -int drm_bo_init_mm(struct drm_device * dev, +int drm_bo_init_mm(struct drm_device *dev, unsigned type, unsigned long p_offset, unsigned long p_size) { @@ -2169,7 +2160,7 @@ EXPORT_SYMBOL(drm_bo_init_mm); * (This may happen on X server restart). */ -int drm_bo_driver_finish(struct drm_device * dev) +int drm_bo_driver_finish(struct drm_device *dev) { struct drm_buffer_manager *bm = &dev->bm; int ret = 0; @@ -2196,23 +2187,22 @@ int drm_bo_driver_finish(struct drm_device * dev) } mutex_unlock(&dev->struct_mutex); - if (!cancel_delayed_work(&bm->wq)) { + if (!cancel_delayed_work(&bm->wq)) flush_scheduled_work(); - } + mutex_lock(&dev->struct_mutex); drm_bo_delayed_delete(dev, 1); - if (list_empty(&bm->ddestroy)) { + if (list_empty(&bm->ddestroy)) DRM_DEBUG("Delayed destroy list was clean\n"); - } - if (list_empty(&bm->man[0].lru)) { + + if (list_empty(&bm->man[0].lru)) DRM_DEBUG("Swap list was clean\n"); - } - if (list_empty(&bm->man[0].pinned)) { + + if (list_empty(&bm->man[0].pinned)) DRM_DEBUG("NO_MOVE list was clean\n"); - } - if (list_empty(&bm->unfenced)) { + + if (list_empty(&bm->unfenced)) DRM_DEBUG("Unfenced list was clean\n"); - } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) unlock_page(bm->dummy_read_page); @@ -2233,7 +2223,7 @@ out: * (This may happen on X server restart). */ -int drm_bo_driver_init(struct drm_device * dev) +int drm_bo_driver_init(struct drm_device *dev) { struct drm_bo_driver *driver = dev->driver->bo_driver; struct drm_buffer_manager *bm = &dev->bm; @@ -2276,11 +2266,10 @@ int drm_bo_driver_init(struct drm_device * dev) bm->cur_pages = 0; INIT_LIST_HEAD(&bm->unfenced); INIT_LIST_HEAD(&bm->ddestroy); - out_unlock: +out_unlock: mutex_unlock(&dev->struct_mutex); return ret; } - EXPORT_SYMBOL(drm_bo_driver_init); int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -2434,7 +2423,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, * buffer object vm functions. */ -int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) +int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem) { struct drm_buffer_manager *bm = &dev->bm; struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; @@ -2451,7 +2440,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) } return 1; } - EXPORT_SYMBOL(drm_mem_reg_is_pci); /** @@ -2497,7 +2485,7 @@ int drm_bo_pci_offset(struct drm_device *dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(struct drm_buffer_object * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object *bo) { struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2509,9 +2497,9 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) { - struct drm_map_list *list; + struct drm_map_list *list; drm_local_map_t *map; struct drm_device *dev = bo->dev; @@ -2539,7 +2527,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index c9c1fdb..92c5603 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -68,12 +68,12 @@ struct drm_user_object { atomic_t refcount; int shareable; struct drm_file *owner; - void (*ref_struct_locked) (struct drm_file * priv, - struct drm_user_object * obj, + void (*ref_struct_locked) (struct drm_file *priv, + struct drm_user_object *obj, enum drm_ref_type ref_action); - void (*unref) (struct drm_file * priv, struct drm_user_object * obj, + void (*unref) (struct drm_file *priv, struct drm_user_object *obj, enum drm_ref_type unref_action); - void (*remove) (struct drm_file * priv, struct drm_user_object * obj); + void (*remove) (struct drm_file *priv, struct drm_user_object *obj); }; /* @@ -94,29 +94,29 @@ struct drm_ref_object { * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, +extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, int shareable); /** * Must be called with the struct_mutex held. */ -extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key); /* * Must be called with the struct_mutex held. May temporarily release it. */ -extern int drm_add_ref_object(struct drm_file * priv, - struct drm_user_object * referenced_object, +extern int drm_add_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. */ -struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, - struct drm_user_object * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. @@ -125,11 +125,11 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); -extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, +extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); +extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, enum drm_object_type type, - struct drm_user_object ** object); -extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, + struct drm_user_object **object); +extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, enum drm_object_type type); /*************************************************** @@ -138,7 +138,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, struct drm_fence_object { struct drm_user_object base; - struct drm_device *dev; + struct drm_device *dev; atomic_t usage; /* @@ -153,7 +153,7 @@ struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; - uint32_t error; + uint32_t error; }; #define _DRM_FENCE_CLASSES 8 @@ -182,40 +182,44 @@ struct drm_fence_driver { uint32_t flush_diff; uint32_t sequence_mask; int lazy_capable; - int (*has_irq) (struct drm_device * dev, uint32_t fence_class, + int (*has_irq) (struct drm_device *dev, uint32_t fence_class, uint32_t flags); - int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags, - uint32_t * breadcrumb, uint32_t * native_type); - void (*poke_flush) (struct drm_device * dev, uint32_t fence_class); + int (*emit) (struct drm_device *dev, uint32_t fence_class, + uint32_t flags, uint32_t *breadcrumb, + uint32_t *native_type); + void (*poke_flush) (struct drm_device *dev, uint32_t fence_class); }; extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, - uint32_t sequence, uint32_t type, uint32_t error); + uint32_t sequence, uint32_t type, + uint32_t error); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, uint32_t sequence); -extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); -extern int drm_fence_object_signaled(struct drm_fence_object * fence, +extern int drm_fence_object_flush(struct drm_fence_object *fence, + uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); -extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(struct drm_fence_object * fence, +extern int drm_fence_object_wait(struct drm_fence_object *fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t fence_class, - struct drm_fence_object ** c_fence); -extern int drm_fence_object_emit(struct drm_fence_object * fence, + struct drm_fence_object **c_fence); +extern int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, uint32_t class, uint32_t type); extern void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg); -extern int drm_fence_add_user_object(struct drm_file * priv, - struct drm_fence_object * fence, int shareable); +extern int drm_fence_add_user_object(struct drm_file *priv, + struct drm_fence_object *fence, + int shareable); extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -242,7 +246,7 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, /* * The ttm backend GTT interface. (In our case AGP). * Any similar type of device (PCIE?) - * needs only to implement these functions to be usable with the "TTM" interface. + * needs only to implement these functions to be usable with the TTM interface. * The AGP backend implementation lives in drm_agpsupport.c * basically maps these calls to available functions in agpgart. * Each drm device driver gets an @@ -257,25 +261,25 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_ttm_backend; struct drm_ttm_backend_func { - int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); - int (*populate) (struct drm_ttm_backend * backend, - unsigned long num_pages, struct page ** pages); - void (*clear) (struct drm_ttm_backend * backend); - int (*bind) (struct drm_ttm_backend * backend, - struct drm_bo_mem_reg * bo_mem); - int (*unbind) (struct drm_ttm_backend * backend); - void (*destroy) (struct drm_ttm_backend * backend); + int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); + int (*populate) (struct drm_ttm_backend *backend, + unsigned long num_pages, struct page **pages); + void (*clear) (struct drm_ttm_backend *backend); + int (*bind) (struct drm_ttm_backend *backend, + struct drm_bo_mem_reg *bo_mem); + int (*unbind) (struct drm_ttm_backend *backend); + void (*destroy) (struct drm_ttm_backend *backend); }; -typedef struct drm_ttm_backend { - struct drm_device *dev; - uint32_t flags; - struct drm_ttm_backend_func *func; -} drm_ttm_backend_t; +struct drm_ttm_backend { + struct drm_device *dev; + uint32_t flags; + struct drm_ttm_backend_func *func; +}; struct drm_ttm { - struct mm_struct *user_mm; + struct mm_struct *user_mm; struct page *dummy_read_page; struct page **pages; uint32_t page_flags; @@ -295,13 +299,13 @@ struct drm_ttm { }; extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem); -extern void drm_ttm_unbind(struct drm_ttm * ttm); -extern void drm_ttm_evict(struct drm_ttm * ttm); -extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); -extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); +extern void drm_ttm_unbind(struct drm_ttm *ttm); +extern void drm_ttm_evict(struct drm_ttm *ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); extern void drm_ttm_cache_flush(void); -extern int drm_ttm_populate(struct drm_ttm * ttm); +extern int drm_ttm_populate(struct drm_ttm *ttm); extern int drm_ttm_set_user(struct drm_ttm *ttm, struct task_struct *tsk, int write, @@ -310,12 +314,12 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm, struct page *dummy_read_page); /* - * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, - * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called - * when the last vma exits. + * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do + * this which calls this function iff there are no vmas referencing it anymore. + * Otherwise it is called when the last vma exits. */ -extern int drm_destroy_ttm(struct drm_ttm * ttm); +extern int drm_destroy_ttm(struct drm_ttm *ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -350,8 +354,8 @@ struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; - uint32_t desired_tile_stride; - uint32_t hw_tile_stride; + uint32_t desired_tile_stride; + uint32_t hw_tile_stride; }; enum drm_bo_type { @@ -381,8 +385,8 @@ struct drm_buffer_object { uint32_t fence_type; uint32_t fence_class; - uint32_t new_fence_type; - uint32_t new_fence_class; + uint32_t new_fence_type; + uint32_t new_fence_class; struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; @@ -421,7 +425,7 @@ struct drm_mem_type_manager { struct list_head pinned; uint32_t flags; uint32_t drm_bus_maptype; - unsigned long gpu_offset; + unsigned long gpu_offset; unsigned long io_offset; unsigned long io_size; void *io_addr; @@ -443,8 +447,8 @@ struct drm_bo_lock { #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ struct drm_buffer_manager { - struct drm_bo_lock bm_lock; - struct mutex evict_mutex; + struct drm_bo_lock bm_lock; + struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; @@ -468,15 +472,15 @@ struct drm_bo_driver { uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; struct drm_ttm_backend *(*create_ttm_backend_entry) - (struct drm_device * dev); + (struct drm_device *dev); int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, - uint32_t * type); - int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); - int (*init_mem_type) (struct drm_device * dev, uint32_t type, - struct drm_mem_type_manager * man); + uint32_t *type); + int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); + int (*init_mem_type) (struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); - int (*move) (struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem); + int (*move) (struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem); void (*ttm_cache_flush)(struct drm_ttm *ttm); }; @@ -501,43 +505,43 @@ extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_f extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - struct drm_bo_mem_reg * mem, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); -extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); extern void drm_putback_buffer_objects(struct drm_device *dev); -extern int drm_fence_buffer_objects(struct drm_device * dev, +extern int drm_fence_buffer_objects(struct drm_device *dev, struct list_head *list, uint32_t fence_flags, - struct drm_fence_object * fence, - struct drm_fence_object ** used_fence); -extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); + struct drm_fence_object *fence, + struct drm_fence_object **used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object *bo); extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, enum drm_bo_type type, uint64_t mask, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, struct drm_buffer_object **bo); -extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, +extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(struct drm_buffer_object * bo, - struct drm_bo_mem_reg * mem, int no_wait); -extern int drm_bo_move_buffer(struct drm_buffer_object * bo, +extern int drm_bo_mem_space(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, int no_wait, int move_unfenced); -extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); -extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, unsigned long p_offset, unsigned long p_size); -extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, +extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, int use_old_fence_class, - struct drm_bo_info_rep * rep, + struct drm_bo_info_rep *rep, struct drm_buffer_object **bo_rep); -extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, uint32_t handle, int check_owner); extern int drm_bo_do_validate(struct drm_buffer_object *bo, @@ -551,18 +555,17 @@ extern int drm_bo_do_validate(struct drm_buffer_object *bo, * drm_bo_move.c */ -extern int drm_bo_move_ttm(struct drm_buffer_object * bo, - int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object *bo, + int evict, int no_wait, + struct drm_bo_mem_reg *new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object *bo, int evict, - int no_wait, struct drm_bo_mem_reg * new_mem); -extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, - int evict, - int no_wait, - uint32_t fence_class, - ... [truncated message content] |
From: <an...@ke...> - 2008-01-24 20:44:15
|
shared-core/i915_dma.c | 135 ++++++++++++++++++++++--------------------------- shared-core/i915_drm.h | 10 +-- 2 files changed, 68 insertions(+), 77 deletions(-) New commits: commit e3c42f00042ffacc7868ed608b9ecf786dcc4e4a Merge: c7ee6cc... 5b99306... Author: Eric Anholt <er...@an...> Date: Thu Jan 24 12:32:08 2008 -0800 Merge commit 'airlied/i915-ttm-cfu' This requires updated Mesa to handle the new relocation format. commit 5b9930645227d52f47b6dc85cd1aee65bb5820ad Author: Dave Airlie <ai...@re...> Date: Thu Jan 24 15:18:09 2008 +1000 i915: fix missing header when copying data from userspace diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index ed56308..287e95a 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -834,7 +834,7 @@ int i915_process_relocs(struct drm_file *file_priv, goto out; } - reloc_buf_size = reloc_count * I915_RELOC0_STRIDE * sizeof(uint32_t); + reloc_buf_size = (I915_RELOC_HEADER + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t); reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); if (!reloc_buf) { DRM_ERROR("Out of memory for reloc buffer\n"); commit 34b71eb45124b32377b82b4d3737537b9195b0a7 Author: Dave Airlie <ai...@re...> Date: Thu Jan 24 14:37:40 2008 +1000 i915 make relocs use copy from user Switch relocs to using copy from user and remove index and pass buffer handles in instead. diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index a36ca37..ed56308 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -739,9 +739,15 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, unsigned index; unsigned long new_cmd_offset; u32 val; - int ret; + int ret, i; + int buf_index = -1; + + for (i = 0; i <= num_buffers; i++) + if (buffers[i].buffer) + if (reloc[2] == buffers[i].buffer->base.hash.key) + buf_index = i; - if (reloc[2] >= num_buffers) { + if (buf_index == -1) { DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); return -EINVAL; } @@ -750,7 +756,7 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, * Short-circuit relocations that were correctly * guessed by the client */ - if (buffers[reloc[2]].presumed_offset_correct && !DRM_DEBUG_RELOCATION) + if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION) return 0; new_cmd_offset = reloc[0]; @@ -777,17 +783,17 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, relocatee->page_offset = (relocatee->offset & PAGE_MASK); } - val = buffers[reloc[2]].buffer->offset; + val = buffers[buf_index].buffer->offset; index = (reloc[0] - relocatee->page_offset) >> 2; /* add in validate */ val = val + reloc[1]; if (DRM_DEBUG_RELOCATION) { - if (buffers[reloc[2]].presumed_offset_correct && + if (buffers[buf_index].presumed_offset_correct && relocatee->data_page[index] != val) { DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", - reloc[0], reloc[1], reloc[2], relocatee->data_page[index], val); + reloc[0], reloc[1], buf_index, relocatee->data_page[index], val); } } relocatee->data_page[index] = val; @@ -796,94 +802,79 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, int i915_process_relocs(struct drm_file *file_priv, uint32_t buf_handle, - uint32_t *reloc_buf_handle, + uint32_t __user **reloc_user_ptr, struct i915_relocatee_info *relocatee, struct drm_i915_validate_buffer *buffers, uint32_t num_buffers) { - struct drm_device *dev = file_priv->head->dev; - struct drm_buffer_object *reloc_list_object; - uint32_t cur_handle = *reloc_buf_handle; - uint32_t *reloc_page; - int ret, reloc_is_iomem, reloc_stride; - uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset; - struct drm_bo_kmap_obj reloc_kmap; - - memset(&reloc_kmap, 0, sizeof(reloc_kmap)); - - mutex_lock(&dev->struct_mutex); - reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!reloc_list_object) - return -EINVAL; + int ret, reloc_stride; + uint32_t cur_offset; + uint32_t reloc_count; + uint32_t reloc_type; + uint32_t reloc_buf_size; + uint32_t *reloc_buf = NULL; + int i; - ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap); + /* do a copy from user from the user ptr */ + ret = get_user(reloc_count, *reloc_user_ptr); if (ret) { DRM_ERROR("Could not map relocation buffer.\n"); goto out; } - reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); - num_relocs = reloc_page[0] & 0xffff; + ret = get_user(reloc_type, (*reloc_user_ptr)+1); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } - if ((reloc_page[0] >> 16) & 0xffff) { + if (reloc_type != 0) { DRM_ERROR("Unsupported relocation type requested\n"); + ret = -EINVAL; goto out; } - /* get next relocate buffer handle */ - *reloc_buf_handle = reloc_page[1]; - reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ - - DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]); - - reloc_page_offset = 0; - reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t); - reloc_end = reloc_offset + (num_relocs * reloc_stride); - - do { - next_offset = drm_bo_offset_end(reloc_offset, reloc_end); + reloc_buf_size = reloc_count * I915_RELOC0_STRIDE * sizeof(uint32_t); + reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); + if (!reloc_buf) { + DRM_ERROR("Out of memory for reloc buffer\n"); + ret = -ENOMEM; + goto out; + } - do { - cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t); - ret = i915_apply_reloc(file_priv, num_buffers, - buffers, relocatee, &reloc_page[cur_offset]); - if (ret) - goto out; + if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) { + ret = -EFAULT; + goto out; + } - reloc_offset += reloc_stride; - } while (reloc_offset < next_offset); + /* get next relocate buffer handle */ + *reloc_user_ptr = (uint32_t *)*(unsigned long *)&reloc_buf[2]; - drm_bo_kunmap(&reloc_kmap); + reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ - reloc_offset = next_offset; - if (reloc_offset != reloc_end) { - ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap); - if (ret) { - DRM_ERROR("Could not map relocation buffer.\n"); - goto out; - } + DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, *reloc_user_ptr); - reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); - reloc_page_offset = reloc_offset & ~PAGE_MASK; - } + for (i = 0; i < reloc_count; i++) { + cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE); + + ret = i915_apply_reloc(file_priv, num_buffers, buffers, + relocatee, reloc_buf + cur_offset); + if (ret) + goto out; + } - } while (reloc_offset != reloc_end); out: + + if (reloc_buf) + kfree(reloc_buf); drm_bo_kunmap(&relocatee->kmap); relocatee->data_page = NULL; - drm_bo_kunmap(&reloc_kmap); - - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(&reloc_list_object); - mutex_unlock(&dev->struct_mutex); - return ret; } static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, - drm_handle_t buf_reloc_handle, + uint32_t __user *reloc_user_ptr, struct drm_i915_validate_buffer *buffers, uint32_t buf_count) { @@ -917,8 +908,8 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, goto out_err; } - while (buf_reloc_handle) { - ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count); + while (reloc_user_ptr) { + ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count); if (ret) { DRM_ERROR("process relocs failed\n"); break; @@ -948,8 +939,8 @@ int i915_validate_buffer_list(struct drm_file *file_priv, int ret = 0; unsigned buf_count = 0; struct drm_device *dev = file_priv->head->dev; - uint32_t buf_reloc_handle, buf_handle; - + uint32_t buf_handle; + uint32_t __user *reloc_user_ptr; do { if (buf_count >= *num_buffers) { @@ -984,10 +975,10 @@ int i915_validate_buffer_list(struct drm_file *file_priv, } buf_handle = req->bo_req.handle; - buf_reloc_handle = arg.reloc_handle; + reloc_user_ptr = (uint32_t *)(unsigned long)arg.reloc_ptr; - if (buf_reloc_handle) { - ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count); + if (reloc_user_ptr) { + ret = i915_exec_reloc(file_priv, buf_handle, reloc_user_ptr, buffers, buf_count); if (ret) goto out_err; DRM_MEMORYBARRIER(); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index cfa3f93..c8a9cb7 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -329,9 +329,9 @@ typedef struct drm_i915_hws_addr { /* * Relocation header is 4 uint32_ts - * 0 - (16-bit relocation type << 16)| 16 bit reloc count - * 1 - buffer handle for another list of relocs - * 2-3 - spare. + * 0 - 32 bit reloc count + * 1 - 32-bit relocation type + * 2-3 - 64-bit user buffer handle ptr for another list of relocs. */ #define I915_RELOC_HEADER 4 @@ -339,7 +339,7 @@ typedef struct drm_i915_hws_addr { * type 0 relocation has 4-uint32_t stride * 0 - offset into buffer * 1 - delta to add in - * 2 - index into buffer list + * 2 - buffer handle * 3 - reserved (for optimisations later). */ #define I915_RELOC_TYPE_0 0 @@ -347,7 +347,7 @@ typedef struct drm_i915_hws_addr { struct drm_i915_op_arg { uint64_t next; - uint32_t reloc_handle; + uint64_t reloc_ptr; int handled; union { struct drm_bo_op_req req; |
From: <an...@ke...> - 2008-03-04 21:52:05
|
linux-core/drm_objects.h | 2 +- linux-core/drm_ttm.c | 21 +++++++++++---------- shared-core/drm.h | 21 +++++++++++++-------- 3 files changed, 25 insertions(+), 19 deletions(-) New commits: commit a6a2f2c8c491617de702dc7d62bb55cbada4d42b Author: Eric Anholt <er...@an...> Date: Tue Mar 4 13:45:41 2008 -0800 Clarify when WAIT_LAZY is relevant to users. diff --git a/shared-core/drm.h b/shared-core/drm.h index 213d3c7..663696c 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -645,6 +645,13 @@ struct drm_set_version { #define DRM_FENCE_FLAG_EMIT 0x00000001 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. + */ #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 #define DRM_FENCE_FLAG_NO_USER 0x00000010 @@ -794,13 +801,12 @@ struct drm_fence_arg { * with it as a result of this operation */ #define DRM_BO_HINT_DONT_FENCE 0x00000004 -/* - * Sleep while waiting for the operation to complete. - * Without this flag, the kernel will, instead, spin - * until this operation has completed. I'm not sure - * why you would ever want this, so please always - * provide DRM_BO_HINT_WAIT_LAZY to any operation - * which may block +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. */ #define DRM_BO_HINT_WAIT_LAZY 0x00000008 /* commit 3332a0add63162222bd9c829117cd7e30d981aa7 Author: Eric Anholt <er...@an...> Date: Wed Jan 30 19:02:56 2008 -0800 Remove unused DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS. diff --git a/shared-core/drm.h b/shared-core/drm.h index 6c13456..213d3c7 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -646,7 +646,6 @@ struct drm_set_version { #define DRM_FENCE_FLAG_EMIT 0x00000001 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 -#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 #define DRM_FENCE_FLAG_NO_USER 0x00000010 /* Reserved for driver use */ commit d41846adb72ba89c94ea1164e366032b1d36bd55 Author: Eric Anholt <er...@an...> Date: Tue Mar 4 13:35:23 2008 -0800 Clarify through the names what drm_ttm_alloc_pages() and friend actually did. These are all about the page directory (pointers to pages) rather than the actual pages backing the allocation. diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 8055afe..69a5c27 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -384,7 +384,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm); * The array of page pointers was allocated with vmalloc * instead of drm_calloc. */ -#define DRM_TTM_PAGE_VMALLOC (1 << 4) +#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4) /* * This ttm is mapped from user space */ diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index a9d8733..cc80b13 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -42,11 +42,12 @@ void drm_ttm_cache_flush(void) } EXPORT_SYMBOL(drm_ttm_cache_flush); -/* - * Use kmalloc if possible. Otherwise fall back to vmalloc. +/** + * Allocates storage for pointers to the pages that back the ttm. + * + * Uses kmalloc if possible. Otherwise falls back to vmalloc. */ - -static void drm_ttm_alloc_pages(struct drm_ttm *ttm) +static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -60,19 +61,19 @@ static void drm_ttm_alloc_pages(struct drm_ttm *ttm) if (!ttm->pages) { ttm->pages = vmalloc_user(size); if (ttm->pages) - ttm->page_flags |= DRM_TTM_PAGE_VMALLOC; + ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC; } if (!ttm->pages) drm_free_memctl(size); } -static void drm_ttm_free_pages(struct drm_ttm *ttm) +static void drm_ttm_free_page_directory(struct drm_ttm *ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); - if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) { + if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) { vfree(ttm->pages); - ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC; + ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC; } else { drm_free(ttm->pages, size, DRM_MEM_TTM); } @@ -215,7 +216,7 @@ int drm_ttm_destroy(struct drm_ttm *ttm) else drm_ttm_free_alloced_pages(ttm); - drm_ttm_free_pages(ttm); + drm_ttm_free_page_directory(ttm); } drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); @@ -349,7 +350,7 @@ struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, * Account also for AGP module memory usage. */ - drm_ttm_alloc_pages(ttm); + drm_ttm_alloc_page_directory(ttm); if (!ttm->pages) { drm_ttm_destroy(ttm); DRM_ERROR("Failed allocating page table\n"); |
From: <dar...@ke...> - 2008-03-23 16:19:15
|
linux-core/Makefile.kernel | 2 linux-core/nouveau_bo.c | 305 ++++++++++++++++++++++++++++++++++++++++++++ linux-core/nouveau_buffer.c | 299 ------------------------------------------- shared-core/nv40_graph.c | 124 ++++++----------- 4 files changed, 353 insertions(+), 377 deletions(-) New commits: commit a244d2905052d3263bdcc26b295558a354702b89 Author: Ben Skeggs <sk...@gm...> Date: Mon Mar 24 03:22:42 2008 +1100 nouveau: silence warning diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index defbe43..f012262 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,7 +23,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o i915_compat.o i915_execbuf.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ - nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c new file mode 100644 index 0000000..7a89976 --- /dev/null +++ b/linux-core/nouveau_bo.c @@ -0,0 +1,305 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied <ai...@li...> + * Ben Skeggs <dar...@ii...> + * Jeremy Kolb <jk...@br...> + */ + +#include "drmP.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static struct drm_ttm_backend * +nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + return drm_agp_init_ttm(dev); + case NOUVEAU_GART_SGDMA: + return nouveau_sgdma_init_ttm(dev); + default: + DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); + break; + } + + return NULL; +} + +static int +nouveau_bo_fence_type(struct drm_buffer_object *bo, + uint32_t *fclass, uint32_t *type) +{ + /* When we get called, *fclass is set to the requested fence class */ + + if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; + +} + +static int +nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) +{ + /* We'll do this from user space. */ + return 0; +} + +static int +nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_VRAM: + man->flags = _DRM_FLAG_MEMTYPE_FIXED | + _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_NEEDS_IOREMAP; + man->io_addr = NULL; + man->drm_bus_maptype = _DRM_FRAME_BUFFER; + man->io_offset = drm_get_resource_start(dev, 1); + man->io_size = drm_get_resource_len(dev, 1); + if (man->io_size > nouveau_mem_fb_amount(dev)) + man->io_size = nouveau_mem_fb_amount(dev); + break; + case DRM_BO_MEM_PRIV0: + /* Unmappable VRAM */ + man->flags = _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_TT: + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + break; + case NOUVEAU_GART_SGDMA: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = _DRM_SCATTER_GATHER; + break; + default: + DRM_ERROR("Unknown GART type: %d\n", + dev_priv->gart_info.type); + return -EINVAL; + } + + man->io_offset = dev_priv->gart_info.aper_base; + man->io_size = dev_priv->gart_info.aper_size; + man->io_addr = NULL; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static uint64_t +nouveau_bo_evict_flags(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } + return 0; +} + + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. + */ +static int +nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint32_t srch, dsth, page_count; + + /* Can happen during init/takedown */ + if (!dchan->chan) + return -EINVAL; + + srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { + dchan->m2mf_dma_source = srch; + dchan->m2mf_dma_destin = dsth; + + BEGIN_RING(NvSubM2MF, + NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); + OUT_RING (dchan->m2mf_dma_source); + OUT_RING (dchan->m2mf_dma_destin); + } + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (PAGE_SIZE); /* src_pitch */ + OUT_RING (PAGE_SIZE); /* dst_pitch */ + OUT_RING (PAGE_SIZE); /* line_length */ + OUT_RING (line_count); + OUT_RING ((1<<8)|(1<<0)); + OUT_RING (0); + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (0); + + page_count -= line_count; + } + + return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, + DRM_FENCE_TYPE_EXE, 0, new_mem); +} + +/* Flip pages into the GART and move if we can. */ +static int +nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg tmp_mem; + int ret; + + tmp_mem = *new_mem; + tmp_mem.mm_node = NULL; + tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | + DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_FORCE_CACHING); + + ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); + + if (ret) + return ret; + + ret = drm_ttm_bind (bo->ttm, &tmp_mem); + if (ret) + goto out_cleanup; + + ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); + if (ret) + goto out_cleanup; + + ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); + +out_cleanup: + if (tmp_mem.mm_node) { + mutex_lock(&dev->struct_mutex); + if (tmp_mem.mm_node != bo->pinned_node) + drm_mm_put_block(tmp_mem.mm_node); + tmp_mem.mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + return ret; +} + +static int +nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +#if 0 + if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { +#if 0 + if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else { +// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + + if (0) { + nouveau_bo_move_m2mf(bo, 0, 0, NULL); + nouveau_bo_move_gart(bo, 0, 0, NULL); + } + + return 0; +} + +static void +nouveau_bo_flush_ttm(struct drm_ttm *ttm) +{ +} + +static uint32_t nouveau_mem_prios[] = { + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_TT, + DRM_BO_MEM_LOCAL +}; +static uint32_t nouveau_busy_prios[] = { + DRM_BO_MEM_TT, + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_LOCAL +}; + +struct drm_bo_driver nouveau_bo_driver = { + .mem_type_prio = nouveau_mem_prios, + .mem_busy_prio = nouveau_busy_prios, + .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .fence_type = nouveau_bo_fence_type, + .invalidate_caches = nouveau_bo_invalidate_caches, + .init_mem_type = nouveau_bo_init_mem_type, + .evict_flags = nouveau_bo_evict_flags, + .move = nouveau_bo_move, + .ttm_cache_flush= nouveau_bo_flush_ttm, + .command_stream_barrier = NULL +}; diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c deleted file mode 100644 index 1154931..0000000 --- a/linux-core/nouveau_buffer.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Copyright 2007 Dave Airlied - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -/* - * Authors: Dave Airlied <ai...@li...> - * Ben Skeggs <dar...@ii...> - * Jeremy Kolb <jk...@br...> - */ - -#include "drmP.h" -#include "nouveau_drm.h" -#include "nouveau_drv.h" -#include "nouveau_dma.h" - -static struct drm_ttm_backend * -nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - return drm_agp_init_ttm(dev); - case NOUVEAU_GART_SGDMA: - return nouveau_sgdma_init_ttm(dev); - default: - DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); - break; - } - - return NULL; -} - -static int -nouveau_bo_fence_type(struct drm_buffer_object *bo, - uint32_t *fclass, uint32_t *type) -{ - /* When we get called, *fclass is set to the requested fence class */ - - if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) - *type = 3; - else - *type = 1; - return 0; - -} - -static int -nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) -{ - /* We'll do this from user space. */ - return 0; -} - -static int -nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, - struct drm_mem_type_manager *man) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (type) { - case DRM_BO_MEM_LOCAL: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_VRAM: - man->flags = _DRM_FLAG_MEMTYPE_FIXED | - _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_NEEDS_IOREMAP; - man->io_addr = NULL; - man->drm_bus_maptype = _DRM_FRAME_BUFFER; - man->io_offset = drm_get_resource_start(dev, 1); - man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > nouveau_mem_fb_amount(dev)) - man->io_size = nouveau_mem_fb_amount(dev); - break; - case DRM_BO_MEM_PRIV0: - /* Unmappable VRAM */ - man->flags = _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_TT: - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_NEEDS_IOREMAP; - man->drm_bus_maptype = _DRM_AGP; - break; - case NOUVEAU_GART_SGDMA: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = _DRM_SCATTER_GATHER; - break; - default: - DRM_ERROR("Unknown GART type: %d\n", - dev_priv->gart_info.type); - return -EINVAL; - } - - man->io_offset = dev_priv->gart_info.aper_base; - man->io_size = dev_priv->gart_info.aper_size; - man->io_addr = NULL; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; -} - -static uint64_t -nouveau_bo_evict_flags(struct drm_buffer_object *bo) -{ - switch (bo->mem.mem_type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return DRM_BO_FLAG_MEM_LOCAL; - default: - return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; - } - return 0; -} - - -/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access - * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. - */ -static int -nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_device *dev = bo->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_drm_channel *dchan = &dev_priv->channel; - struct drm_bo_mem_reg *old_mem = &bo->mem; - uint32_t srch, dsth, page_count; - - /* Can happen during init/takedown */ - if (!dchan->chan) - return -EINVAL; - - srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { - dchan->m2mf_dma_source = srch; - dchan->m2mf_dma_destin = dsth; - - BEGIN_RING(NvSubM2MF, - NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); - OUT_RING (dchan->m2mf_dma_source); - OUT_RING (dchan->m2mf_dma_destin); - } - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 2047) ? 2047 : page_count; - - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (PAGE_SIZE); /* src_pitch */ - OUT_RING (PAGE_SIZE); /* dst_pitch */ - OUT_RING (PAGE_SIZE); /* line_length */ - OUT_RING (line_count); - OUT_RING ((1<<8)|(1<<0)); - OUT_RING (0); - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING (0); - - page_count -= line_count; - } - - return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, - DRM_FENCE_TYPE_EXE, 0, new_mem); -} - -/* Flip pages into the GART and move if we can. */ -static int -nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_device *dev = bo->dev; - struct drm_bo_mem_reg tmp_mem; - int ret; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | - DRM_BO_FLAG_CACHED | - DRM_BO_FLAG_FORCE_CACHING); - - ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); - - if (ret) - return ret; - - ret = drm_ttm_bind (bo->ttm, &tmp_mem); - if (ret) - goto out_cleanup; - - ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); - if (ret) - goto out_cleanup; - - ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); - -out_cleanup: - if (tmp_mem.mm_node) { - mutex_lock(&dev->struct_mutex); - if (tmp_mem.mm_node != bo->pinned_node) - drm_mm_put_block(tmp_mem.mm_node); - tmp_mem.mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - } - return ret; -} - -static int -nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_bo_mem_reg *old_mem = &bo->mem; - - if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); -#if 0 - if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { -#if 0 - if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else { -// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - return 0; -} - -static void -nouveau_bo_flush_ttm(struct drm_ttm *ttm) -{ -} - -static uint32_t nouveau_mem_prios[] = { - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_TT, - DRM_BO_MEM_LOCAL -}; -static uint32_t nouveau_busy_prios[] = { - DRM_BO_MEM_TT, - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_LOCAL -}; - -struct drm_bo_driver nouveau_bo_driver = { - .mem_type_prio = nouveau_mem_prios, - .mem_busy_prio = nouveau_busy_prios, - .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), - .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, - .fence_type = nouveau_bo_fence_type, - .invalidate_caches = nouveau_bo_invalidate_caches, - .init_mem_type = nouveau_bo_init_mem_type, - .evict_flags = nouveau_bo_evict_flags, - .move = nouveau_bo_move, - .ttm_cache_flush= nouveau_bo_flush_ttm, - .command_stream_barrier = NULL -}; commit 24ba0c9c3bd0f160eb0c3a820fd407998f85fd55 Author: Ben Skeggs <sk...@gm...> Date: Mon Mar 24 03:20:59 2008 +1100 nv40: voodoo - not quite. diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 3e47bba..2540fc5 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -1617,25 +1617,12 @@ nv40_graph_load_context(struct nouveau_channel *chan) return 0; } -/* Some voodoo that makes context switching work without the binary driver - * initialising the card first. - * - * It is possible to effect how the context is saved from PGRAPH into a block - * of instance memory by altering the values in these tables. This may mean - * that the context layout of each chipset is slightly different (at least - * NV40 and C51 are different). It would also be possible for chipsets to - * have an identical context layout, but pull the data from different PGRAPH - * registers. - * - * TODO: decode the meaning of the magic values, may provide clues about the - * differences between the various NV40 chipsets. - * TODO: one we have a better idea of how each chipset differs, perhaps think - * about unifying these instead of providing a separate table for each - * chip. - * - * mmio-trace dumps from other nv4x/g7x/c5x cards very welcome :) +/* These blocks of "magic numbers" are actually a microcode that the GPU uses + * to control how graphics contexts get saved and restored between PRAMIN + * and PGRAPH during a context switch. We're currently using values seen + * in mmio-traces of the binary driver. */ -static uint32_t nv40_ctx_voodoo[] = { +static uint32_t nv40_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, @@ -1667,7 +1654,7 @@ static uint32_t nv40_ctx_voodoo[] = { ~0 }; -static uint32_t nv41_ctx_voodoo[] = { +static uint32_t nv41_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, @@ -1698,7 +1685,7 @@ static uint32_t nv41_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; -static uint32_t nv43_ctx_voodoo[] = { +static uint32_t nv43_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, @@ -1731,7 +1718,7 @@ static uint32_t nv43_ctx_voodoo[] = { ~0 }; -static uint32_t nv44_ctx_voodoo[] = { +static uint32_t nv44_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06, 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, @@ -1764,7 +1751,7 @@ static uint32_t nv44_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; -static uint32_t nv46_ctx_voodoo[] = { +static uint32_t nv46_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, @@ -1795,7 +1782,7 @@ static uint32_t nv46_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; -static uint32_t nv47_ctx_voodoo[] = { +static uint32_t nv47_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606, 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, @@ -1828,7 +1815,7 @@ static uint32_t nv47_ctx_voodoo[] = { }; //this is used for nv49 and nv4b -static uint32_t nv49_4b_ctx_voodoo[] ={ +static uint32_t nv49_4b_ctx_prog[] ={ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020, 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000, 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e, @@ -1860,7 +1847,7 @@ static uint32_t nv49_4b_ctx_voodoo[] ={ }; -static uint32_t nv4a_ctx_voodoo[] = { +static uint32_t nv4a_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, @@ -1893,7 +1880,7 @@ static uint32_t nv4a_ctx_voodoo[] = { 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; -static uint32_t nv4c_ctx_voodoo[] = { +static uint32_t nv4c_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406, 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042, @@ -1924,7 +1911,7 @@ static uint32_t nv4c_ctx_voodoo[] = { 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 }; -static uint32_t nv4e_ctx_voodoo[] = { +static uint32_t nv4e_ctx_prog[] = { 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, @@ -1971,7 +1958,7 @@ nv40_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = (struct drm_nouveau_private *)dev->dev_private; - uint32_t *ctx_voodoo; + uint32_t *ctx_prog; uint32_t vramsz, tmp; int i, j; @@ -1981,34 +1968,34 @@ nv40_graph_init(struct drm_device *dev) NV_PMC_ENABLE_PGRAPH); switch (dev_priv->chipset) { - case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; + case 0x40: ctx_prog = nv40_ctx_prog; break; case 0x41: - case 0x42: ctx_voodoo = nv41_ctx_voodoo; break; - case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; - case 0x44: ctx_voodoo = nv44_ctx_voodoo; break; - case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; - case 0x47: ctx_voodoo = nv47_ctx_voodoo; break; - case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; - case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; - case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; + case 0x42: ctx_prog = nv41_ctx_prog; break; + case 0x43: ctx_prog = nv43_ctx_prog; break; + case 0x44: ctx_prog = nv44_ctx_prog; break; + case 0x46: ctx_prog = nv46_ctx_prog; break; + case 0x47: ctx_prog = nv47_ctx_prog; break; + case 0x49: ctx_prog = nv49_4b_ctx_prog; break; + case 0x4a: ctx_prog = nv4a_ctx_prog; break; + case 0x4b: ctx_prog = nv49_4b_ctx_prog; break; case 0x4c: - case 0x67: ctx_voodoo = nv4c_ctx_voodoo; break; - case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; + case 0x67: ctx_prog = nv4c_ctx_prog; break; + case 0x4e: ctx_prog = nv4e_ctx_prog; break; default: - DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", - dev_priv->chipset); - ctx_voodoo = NULL; + DRM_ERROR("Context program for 0x%02x unavailable\n", + dev_priv->chipset); + ctx_prog = NULL; break; } - /* Load the context voodoo onto the card */ - if (ctx_voodoo) { - DRM_DEBUG("Loading context-switch voodoo\n"); + /* Load the context program onto the card */ + if (ctx_prog) { + DRM_DEBUG("Loading context program\n"); i = 0; NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); - while (ctx_voodoo[i] != ~0) { - NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]); + while (ctx_prog[i] != ~0) { + NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]); i++; } } commit 6f4b3de284e93e8fdb133f0aadfc86d298f45916 Author: Ben Skeggs <sk...@gm...> Date: Mon Mar 24 03:13:05 2008 +1100 nv40: allocate massive amount of PRAMIN for grctx on all chipsets. More or less a workaround for issues on some chipsets where a context switch results in critical data in PRAMIN being overwritten by the GPU. The correct fix is known, but may take some time before it's a feasible option. diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 6ef02bf..3e47bba 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -28,22 +28,6 @@ #include "drm.h" #include "nouveau_drv.h" -/* The sizes are taken from the difference between the start of two - * grctx addresses while running the nvidia driver. Probably slightly - * larger than they actually are, because of other objects being created - * between the contexts - */ -#define NV40_GRCTX_SIZE (175*1024) -#define NV41_GRCTX_SIZE (92*1024) -#define NV43_GRCTX_SIZE (70*1024) -#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */ -#define NV47_GRCTX_SIZE (125*1024) -#define NV49_GRCTX_SIZE (164640) -#define NV4A_GRCTX_SIZE (64*1024) -#define NV4B_GRCTX_SIZE (164640) -#define NV4C_GRCTX_SIZE (25*1024) -#define NV4E_GRCTX_SIZE (25*1024) - /*TODO: deciper what each offset in the context represents. The below * contexts are taken from dumps just after the 3D object is * created. @@ -1471,61 +1455,60 @@ nv40_graph_create_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); - unsigned int ctx_size; int ret; + /* These functions populate the graphics context with a whole heap + * of default state. All these functions are very similar, with + * a minimal amount of chipset-specific changes. However, as we're + * currently dependant on the context programs used by the NVIDIA + * binary driver these functions must match the layout expected by + * them. Hopefully at some point this will all change. + */ switch (dev_priv->chipset) { case 0x40: - ctx_size = NV40_GRCTX_SIZE; ctx_init = nv40_graph_context_init; break; case 0x41: case 0x42: - ctx_size = NV41_GRCTX_SIZE; ctx_init = nv41_graph_context_init; break; case 0x43: - ctx_size = NV43_GRCTX_SIZE; ctx_init = nv43_graph_context_init; break; case 0x46: - ctx_size = NV46_GRCTX_SIZE; ctx_init = nv46_graph_context_init; break; case 0x47: - DRM_INFO("NV47 warning: If your card behaves strangely, please come to the irc channel\n"); - ctx_size = NV47_GRCTX_SIZE; ctx_init = nv47_graph_context_init; break; case 0x49: - ctx_size = NV49_GRCTX_SIZE; ctx_init = nv49_graph_context_init; break; case 0x44: case 0x4a: - ctx_size = NV4A_GRCTX_SIZE; ctx_init = nv4a_graph_context_init; break; case 0x4b: - ctx_size = NV4B_GRCTX_SIZE; ctx_init = nv4b_graph_context_init; break; case 0x4c: case 0x67: - ctx_size = NV4C_GRCTX_SIZE; ctx_init = nv4c_graph_context_init; break; case 0x4e: - ctx_size = NV4E_GRCTX_SIZE; ctx_init = nv4e_graph_context_init; break; default: - ctx_size = NV40_GRCTX_SIZE; ctx_init = nv40_graph_context_init; break; } - if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, + /* Allocate a 175KiB block of PRAMIN to store the context. This + * is massive overkill for a lot of chipsets, but it should be safe + * until we're able to implement this properly (will happen at more + * or less the same time we're able to write our own context programs. + */ + if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx))) return ret; |
From: <th...@ke...> - 2008-04-06 08:32:24
|
linux-core/drm_memory.c | 42 +++++++++++++++++++++++------------------- linux-core/drm_objects.h | 2 +- linux-core/drm_ttm.c | 39 +++++++++++++++++++++++++++++++++++++-- linux-core/i915_buffer.c | 4 ++-- 4 files changed, 63 insertions(+), 24 deletions(-) New commits: commit c3888b97f60fbbc0b1382e5a16689eecaa2f79a5 Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Sun Apr 6 10:32:02 2008 +0200 Use clflush() when available for cache flushing. diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 1f5d6ee..9bd04ff 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -335,7 +335,7 @@ extern void drm_ttm_unbind(struct drm_ttm *ttm); extern void drm_ttm_evict(struct drm_ttm *ttm); extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); -extern void drm_ttm_cache_flush(void); +extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages); extern int drm_ttm_populate(struct drm_ttm *ttm); extern int drm_ttm_set_user(struct drm_ttm *ttm, struct task_struct *tsk, diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index e991254..da202a5 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -30,13 +30,48 @@ #include "drmP.h" +#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= (2,6,24)) +static void drm_clflush_page(struct page *page) +{ + uint8_t *page_virtual; + unsigned int i; + + if (unlikely(page == NULL)) + return; + + page_virtual = kmap_atomic(page, KM_USER0); + + for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(page_virtual + i); + + kunmap_atomic(page_virtual, KM_USER0); +} + +static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages) +{ + unsigned long i; + + mb(); + for (i=0; i < num_pages; ++i) + drm_clflush_page(*pages++); + mb(); +} +#endif + static void drm_ttm_ipi_handler(void *null) { flush_agp_cache(); } -void drm_ttm_cache_flush(void) +void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages) { + +#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= (2,6,24)) + if (cpu_has_clflush) { + drm_ttm_cache_flush_clflush(pages, num_pages); + return; + } +#endif if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) DRM_ERROR("Timed out waiting for drm cache flush.\n"); } @@ -114,7 +149,7 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached) return 0; if (noncached) - drm_ttm_cache_flush(); + drm_ttm_cache_flush(ttm->pages, ttm->num_pages); for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages + i; commit 51a0fdcf3fef5af57938d9958efd698e96d78803 Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Sun Apr 6 09:46:29 2008 +0200 [I915] Fix VRAM eviction. diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 0806747..8d991c4 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -249,10 +249,10 @@ int i915_move(struct drm_buffer_object *bo, if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ + if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { - if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ + if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } return 0; commit 87ae5b22e3120d205f520a99cea31743903d49a2 Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Sun Apr 6 09:33:50 2008 +0200 Fix emergency allocation accounting. diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index 12e0141..75f5b52 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -61,35 +61,39 @@ static inline size_t drm_size_align(size_t size) int drm_alloc_memctl(size_t size) { - int ret = 0; + int ret = 0; unsigned long a_size = drm_size_align(size); - unsigned long new_used = drm_memctl.cur_used + a_size; + unsigned long new_used; spin_lock(&drm_memctl.lock); - if (unlikely(new_used > drm_memctl.high_threshold)) { - if (!DRM_SUSER(DRM_CURPROC) || - (new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) || - (a_size > 2*PAGE_SIZE)) { - ret = -ENOMEM; - goto out; - } - - /* - * Allow small root-only allocations, even if the - * high threshold is exceeded. - */ - - new_used -= drm_memctl.high_threshold; - drm_memctl.emer_used += new_used; - a_size -= new_used; + new_used = drm_memctl.cur_used + a_size; + if (likely(new_used < drm_memctl.high_threshold)) { + drm_memctl.cur_used = new_used; + goto out; } - drm_memctl.cur_used += a_size; + + /* + * Allow small allocations from root-only processes to + * succeed until the emergency threshold is reached. + */ + + new_used += drm_memctl.emer_used; + if (unlikely(!DRM_SUSER(DRM_CURPROC) || + (a_size > 16*PAGE_SIZE) || + (new_used > drm_memctl.emer_threshold))) { + ret = -ENOMEM; + goto out; + } + + drm_memctl.cur_used = drm_memctl.high_threshold; + drm_memctl.emer_used = new_used - drm_memctl.high_threshold; out: spin_unlock(&drm_memctl.lock); return ret; } EXPORT_SYMBOL(drm_alloc_memctl); + void drm_free_memctl(size_t size) { unsigned long a_size = drm_size_align(size); |
From: <th...@ke...> - 2008-04-14 10:17:25
|
linux-core/drm_bo.c | 522 ++++++++++++++++++++++------------------------ linux-core/drm_bo_lock.c | 46 ++-- linux-core/drm_bo_move.c | 9 linux-core/drm_compat.c | 2 linux-core/drm_objects.h | 20 + linux-core/drm_vm.c | 7 linux-core/i915_execbuf.c | 12 - 7 files changed, 316 insertions(+), 302 deletions(-) New commits: commit c5955c652302d66719984cb5a218cb590c74ad42 Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Mon Apr 14 12:10:50 2008 +0200 Fix buffer object creation validation. BO lock fixes. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 0853d74..6f28753 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1244,6 +1244,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); + return ret; } @@ -1541,8 +1542,9 @@ static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, ftype, no_wait); } - if (ret && ret != -EAGAIN) + if (ret && ret != -EAGAIN) ret = drm_bo_wait(bo, 0, 1, no_wait, 1); + if (ret) return ret; } @@ -1622,6 +1624,7 @@ out: drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); + return ret; } EXPORT_SYMBOL(drm_bo_do_validate); @@ -1816,7 +1819,7 @@ int drm_buffer_object_create(struct drm_device *dev, } mutex_unlock(&bo->mutex); - ret = drm_bo_do_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK, + ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE, 0, NULL); if (ret) goto out_err_unlocked; diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c index 32ebfbe..08b1c6b 100644 --- a/linux-core/drm_bo_lock.c +++ b/linux-core/drm_bo_lock.c @@ -68,9 +68,7 @@ void drm_bo_init_lock(struct drm_bo_lock *lock) void drm_bo_read_unlock(struct drm_bo_lock *lock) { - if (unlikely(atomic_add_negative(-1, &lock->readers))) - BUG(); - if (atomic_read(&lock->readers) == 0) + if (atomic_dec_and_test(&lock->readers)) wake_up_all(&lock->queue); } EXPORT_SYMBOL(drm_bo_read_unlock); @@ -79,7 +77,7 @@ int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) { while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { int ret; - + if (!interruptible) { wait_event(lock->queue, atomic_read(&lock->write_lock_pending) == 0); @@ -93,7 +91,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { int ret; - if (!interruptible) { wait_event(lock->queue, atomic_read(&lock->readers) != -1); @@ -156,7 +153,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible, * while holding it. */ - atomic_dec(&lock->write_lock_pending); + if (atomic_dec_and_test(&lock->write_lock_pending)) + wake_up_all(&lock->queue); dev = file_priv->minor->dev; mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(file_priv, &lock->base, 0); commit c9b73ef6daff75df27d17260a9fc84e68f1b21b4 Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Sun Apr 13 14:49:14 2008 +0200 Unlock the BO mutex while waiting for idle, unmapped, unfenced. Move unfenced checking into idle checking. Never time out while waiting for software events like unmapped or unfenced. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4ef697b..0853d74 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -275,30 +275,81 @@ out_err: /* * Call bo->mutex locked. - * Wait until the buffer is idle. + * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. */ -int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, - int no_wait) +static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced) { - int ret; + struct drm_fence_object *fence = bo->fence; - DRM_ASSERT_LOCKED(&bo->mutex); + if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) + return -EBUSY; - if (bo->fence) { - if (drm_fence_object_signaled(bo->fence, bo->fence_type)) { + if (fence) { + if (drm_fence_object_signaled(fence, bo->fence_type)) { + drm_fence_usage_deref_unlocked(&bo->fence); + return 0; + } + drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); + if (drm_fence_object_signaled(fence, bo->fence_type)) { drm_fence_usage_deref_unlocked(&bo->fence); return 0; } + return -EBUSY; + } + return 0; +} + +static int drm_bo_check_unfenced(struct drm_buffer_object *bo) +{ + int ret; + + mutex_lock(&bo->mutex); + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&bo->mutex); + return ret; +} + + +/* + * Call bo->mutex locked. + * Wait until the buffer is idle. + */ + +int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, + int no_wait, int check_unfenced) +{ + int ret; + + DRM_ASSERT_LOCKED(&bo->mutex); + while(unlikely(drm_bo_busy(bo, check_unfenced))) { if (no_wait) return -EBUSY; - ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals, - bo->fence_type); - if (ret) - return ret; + if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) { + mutex_unlock(&bo->mutex); + wait_event(bo->event_queue, !drm_bo_check_unfenced(bo)); + mutex_lock(&bo->mutex); + bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; + } + + if (bo->fence) { + struct drm_fence_object *fence; + uint32_t fence_type = bo->fence_type; + + drm_fence_reference_unlocked(&fence, bo->fence); + mutex_unlock(&bo->mutex); + + ret = drm_fence_object_wait(fence, lazy, !interruptible, + fence_type); + + drm_fence_usage_deref_unlocked(&fence); + mutex_lock(&bo->mutex); + bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; + if (ret) + return ret; + } - drm_fence_usage_deref_unlocked(&bo->fence); } return 0; } @@ -314,7 +365,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) unsigned long _end = jiffies + 3 * DRM_HZ; int ret; do { - ret = drm_bo_wait(bo, 0, 1, 0); + ret = drm_bo_wait(bo, 0, 0, 0, 0); if (ret && allow_errors) return ret; @@ -690,24 +741,32 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, * buffer mutex. */ - if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) - goto out; - if (bo->mem.mem_type != mem_type) - goto out; - - ret = drm_bo_wait(bo, 0, 0, no_wait); + do { + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; + + if (unlikely(bo->mem.flags & + (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) + goto out_unlock; + if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) + goto out_unlock; + if (unlikely(bo->mem.mem_type != mem_type)) + goto out_unlock; + ret = drm_bo_wait(bo, 0, 1, no_wait, 0); + if (ret) + goto out_unlock; - if (ret && ret != -EAGAIN) { - DRM_ERROR("Failed to expire fence before " - "buffer eviction.\n"); - goto out; - } + } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); evict_mem = bo->mem; evict_mem.mm_node = NULL; evict_mem = bo->mem; evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo); + + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + mutex_unlock(&dev->struct_mutex); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); if (ret) { @@ -725,20 +784,21 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, goto out; } + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, + _DRM_BO_FLAG_EVICTED); + +out: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } - list_del(&bo->lru); drm_bo_add_to_lru(bo); + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); +out_unlock: mutex_unlock(&dev->struct_mutex); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, - _DRM_BO_FLAG_EVICTED); - -out: return ret; } @@ -773,8 +833,6 @@ static int drm_bo_mem_force_space(struct drm_device *dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); - ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(&entry); @@ -1040,46 +1098,23 @@ EXPORT_SYMBOL(drm_lookup_buffer_object); /* * Call bo->mutex locked. - * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(struct drm_buffer_object *bo) +static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced) { struct drm_fence_object *fence = bo->fence; - BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (fence) { - if (drm_fence_object_signaled(fence, bo->fence_type)) { - drm_fence_usage_deref_unlocked(&bo->fence); - return 0; - } - return 1; - } - return 0; -} - -/* - * Call bo->mutex locked. - * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. - */ - -static int drm_bo_busy(struct drm_buffer_object *bo) -{ - struct drm_fence_object *fence = bo->fence; + if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) + return -EBUSY; - BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { if (drm_fence_object_signaled(fence, bo->fence_type)) { drm_fence_usage_deref_unlocked(&bo->fence); return 0; } - drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); - if (drm_fence_object_signaled(fence, bo->fence_type)) { - drm_fence_usage_deref_unlocked(&bo->fence); - return 0; - } - return 1; + return -EBUSY; } return 0; } @@ -1103,62 +1138,27 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) { int ret = 0; - if ((atomic_read(&bo->mapped) >= 0) && no_wait) - return -EBUSY; - - DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, - atomic_read(&bo->mapped) == -1); + if (likely(atomic_read(&bo->mapped)) == 0) + return 0; - if (ret == -EINTR) - ret = -EAGAIN; + if (unlikely(no_wait)) + return -EBUSY; - return ret; -} + do { + mutex_unlock(&bo->mutex); + ret = wait_event_interruptible(bo->event_queue, + atomic_read(&bo->mapped) == 0); + mutex_lock(&bo->mutex); + bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; -static int drm_bo_check_unfenced(struct drm_buffer_object *bo) -{ - int ret; + if (ret == -ERESTARTSYS) + ret = -EAGAIN; + } while((ret == 0) && atomic_read(&bo->mapped) > 0); - mutex_lock(&bo->mutex); - ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - mutex_unlock(&bo->mutex); return ret; } /* - * Wait until a buffer, scheduled to be fenced moves off the unfenced list. - * Until then, we cannot really do anything with it except delete it. - */ - -static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait, - int eagain_if_wait) -{ - int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - - if (ret && no_wait) - return -EBUSY; - else if (!ret) - return 0; - - ret = 0; - mutex_unlock(&bo->mutex); - DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ, - !drm_bo_check_unfenced(bo)); - mutex_lock(&bo->mutex); - if (ret == -EINTR) - return -EAGAIN; - ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (ret) { - DRM_ERROR("Timeout waiting for buffer to become fenced\n"); - return -EBUSY; - } - if (eagain_if_wait) - return -EAGAIN; - - return 0; -} - -/* * Fill in the ioctl reply argument with buffer info. * Bo locked. */ @@ -1190,7 +1190,7 @@ void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, rep->rep_flags = 0; rep->page_alignment = bo->mem.page_alignment; - if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { + if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) { DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, DRM_BO_REP_BUSY); } @@ -1221,59 +1221,27 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; - - /* - * If this returns true, we are currently unmapped. - * We need to do this test, because unmapping can - * be done without the bo->mutex held. - */ - - while (1) { - if (atomic_inc_and_test(&bo->mapped)) { - if (no_wait && drm_bo_busy(bo)) { - atomic_dec(&bo->mapped); - ret = -EBUSY; - goto out; - } - ret = drm_bo_wait(bo, 0, 0, no_wait); - if (ret) { - atomic_dec(&bo->mapped); - goto out; - } - - if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) - drm_bo_evict_cached(bo); - - break; - } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) { + do { + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; - /* - * We are already mapped with different flags. - * need to wait for unmap. - */ + ret = drm_bo_wait(bo, 0, 1, no_wait, 1); - ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) - goto out; + if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) + drm_bo_evict_cached(bo); - continue; - } - break; - } + } while (bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); + atomic_inc(&bo->mapped); mutex_lock(&dev->struct_mutex); ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); mutex_unlock(&dev->struct_mutex); if (ret) { - if (atomic_add_negative(-1, &bo->mapped)) + if (atomic_dec_and_test(&bo->mapped)) wake_up_all(&bo->event_queue); } else drm_bo_fill_rep_arg(bo, rep); -out: + mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); return ret; @@ -1325,7 +1293,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv, BUG_ON(action != _DRM_REF_TYPE1); - if (atomic_add_negative(-1, &bo->mapped)) + if (atomic_dec_and_test(&bo->mapped)) wake_up_all(&bo->event_queue); } @@ -1341,19 +1309,8 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, struct drm_buffer_manager *bm = &dev->bm; int ret = 0; struct drm_bo_mem_reg mem; - /* - * Flush outstanding fences. - */ - - drm_bo_busy(bo); - /* - * Wait for outstanding fences. - */ - - ret = drm_bo_wait(bo, 0, 0, no_wait); - if (ret) - return ret; + BUG_ON(bo->fence != NULL); mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; @@ -1439,64 +1396,14 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) static int drm_buffer_object_validate(struct drm_buffer_object *bo, uint32_t fence_class, - int move_unfenced, int no_wait) + int move_unfenced, int no_wait, + int move_buffer) { struct drm_device *dev = bo->dev; struct drm_buffer_manager *bm = &dev->bm; - struct drm_bo_driver *driver = dev->driver->bo_driver; - uint32_t ftype; int ret; - DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", - (unsigned long long) bo->mem.proposed_flags, - (unsigned long long) bo->mem.flags); - - ret = driver->fence_type(bo, &fence_class, &ftype); - - if (ret) { - DRM_ERROR("Driver did not support given buffer permissions\n"); - return ret; - } - - /* - * We're switching command submission mechanism, - * or cannot simply rely on the hardware serializing for us. - * - * Insert a driver-dependant barrier or wait for buffer idle. - */ - - if ((fence_class != bo->fence_class) || - ((ftype ^ bo->fence_type) & bo->fence_type)) { - - ret = -EINVAL; - if (driver->command_stream_barrier) { - ret = driver->command_stream_barrier(bo, - fence_class, - ftype, - no_wait); - } - if (ret) - ret = drm_bo_wait(bo, 0, 0, no_wait); - - if (ret) - return ret; - - } - - bo->new_fence_class = fence_class; - bo->new_fence_type = ftype; - - ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) { - DRM_ERROR("Timed out waiting for buffer unmap.\n"); - return ret; - } - - /* - * Check whether we need to move buffer. - */ - - if (!drm_bo_mem_compat(&bo->mem)) { + if (move_buffer) { ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait, move_unfenced); if (ret) { @@ -1580,6 +1487,82 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo, return 0; } +/* + * This function is called with bo->mutex locked, but may release it + * temporarily to wait for events. + */ + +static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo, + uint64_t flags, + uint64_t mask, + uint32_t hint, + uint32_t fence_class, + int no_wait, + int *move_buffer) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_driver *driver = dev->driver->bo_driver; + uint32_t ftype; + + int ret; + + DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.proposed_flags, + (unsigned long long) bo->mem.flags); + + ret = drm_bo_modify_proposed_flags (bo, flags, mask); + if (ret) + return ret; + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + return ret; + + ret = driver->fence_type(bo, &fence_class, &ftype); + + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions.\n"); + return ret; + } + + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * Insert a driver-dependant barrier or wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = -EINVAL; + if (driver->command_stream_barrier) { + ret = driver->command_stream_barrier(bo, + fence_class, + ftype, + no_wait); + } + if (ret && ret != -EAGAIN) + ret = drm_bo_wait(bo, 0, 1, no_wait, 1); + if (ret) + return ret; + } + + bo->new_fence_class = fence_class; + bo->new_fence_type = ftype; + + /* + * Check whether we need to move buffer. + */ + + *move_buffer = 0; + if (!drm_bo_mem_compat(&bo->mem)) { + *move_buffer = 1; + ret = drm_bo_wait(bo, 0, 1, no_wait, 1); + } + + return ret; +} + /** * drm_bo_do_validate: * @@ -1612,21 +1595,28 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, { int ret; int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0; + int move_buffer; mutex_lock(&bo->mutex); - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; + do { + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; - ret = drm_bo_modify_proposed_flags (bo, flags, mask); - if (ret) - goto out; + ret = drm_bo_prepare_for_validate(bo, flags, mask, hint, + fence_class, no_wait, + &move_buffer); + if (ret) + goto out; + + } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); ret = drm_buffer_object_validate(bo, fence_class, !(hint & DRM_BO_HINT_DONT_FENCE), - no_wait); + no_wait, + move_buffer); + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); out: if (rep) drm_bo_fill_rep_arg(bo, rep); @@ -1657,22 +1647,19 @@ EXPORT_SYMBOL(drm_bo_do_validate); * fencing mechanism. At this point, there isn't any use of this * from the user mode code. * - * @use_old_fence_class: don't change fence class, pull it from the buffer object - * * @rep: To be stuffed with the reply from validation - * + * * @bp_rep: To be stuffed with the buffer object pointer * - * Perform drm_bo_do_validate on a buffer referenced by a user-space handle. - * Some permissions checking is done on the parameters, otherwise this - * is a thin wrapper. + * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead + * of a pointer to a buffer object. Optionally return a pointer to the buffer object. + * This is a convenience wrapper only. */ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint64_t flags, uint64_t mask, uint32_t hint, uint32_t fence_class, - int use_old_fence_class, struct drm_bo_info_rep *rep, struct drm_buffer_object **bo_rep) { @@ -1687,17 +1674,9 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, if (!bo) return -EINVAL; - if (use_old_fence_class) - fence_class = bo->fence_class; - - /* - * Only allow creator to change shared buffer mask. - */ - if (bo->base.owner != file_priv) mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); - ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep); if (!ret && bo_rep) @@ -1709,6 +1688,7 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, } EXPORT_SYMBOL(drm_bo_handle_validate); + static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { @@ -1723,8 +1703,12 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) - (void)drm_bo_busy(bo); + + /* + * FIXME: Quick busy here? + */ + + drm_bo_busy(bo, 1); drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); @@ -1748,15 +1732,11 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; - ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait); + ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1); if (ret) goto out; drm_bo_fill_rep_arg(bo, rep); - out: mutex_unlock(&bo->mutex); drm_bo_usage_deref_unlocked(&bo); @@ -1793,7 +1773,7 @@ int drm_buffer_object_create(struct drm_device *dev, mutex_lock(&bo->mutex); atomic_set(&bo->usage, 1); - atomic_set(&bo->mapped, -1); + atomic_set(&bo->mapped, 0); DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->pinned_lru); @@ -1835,17 +1815,18 @@ int drm_buffer_object_create(struct drm_device *dev, goto out_err; } - ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); + mutex_unlock(&bo->mutex); + ret = drm_bo_do_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK, + 0, NULL); if (ret) - goto out_err; + goto out_err_unlocked; - mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; out_err: mutex_unlock(&bo->mutex); - +out_err_unlocked: drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1931,6 +1912,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, struct drm_bo_map_wait_idle_arg *arg = data; struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *bo; int ret; if (!dev->bm.initialized) { @@ -1942,24 +1924,25 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, if (ret) return ret; - /* - * validate the buffer. note that 'fence_class' will be unused - * as we pass use_old_fence_class=1 here. Note also that - * the libdrm API doesn't pass fence_class to the kernel, - * so it's a good thing it isn't used here. - */ - ret = drm_bo_handle_validate(file_priv, req->handle, - req->flags, - req->mask, - req->hint | DRM_BO_HINT_DONT_FENCE, - req->fence_class, 1, - rep, NULL); + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, req->handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + if (bo->base.owner != file_priv) + req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); + + ret = drm_bo_do_validate(bo, req->flags, req->mask, + req->hint | DRM_BO_HINT_DONT_FENCE, + bo->fence_class, rep); + + drm_bo_usage_deref_unlocked(&bo); (void) drm_bo_read_unlock(&dev->bm.bm_lock); - if (ret) - return ret; - return 0; + return ret; } int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 21673da..bf0e1b7 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -356,10 +356,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, bo->mem.mm_node != NULL)) #endif { - ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) - return ret; - + if (bo->fence) { + (void) drm_fence_object_wait(bo->fence, 0, 1, + bo->fence_type); + drm_fence_usage_deref_unlocked(&bo->fence); + } drm_bo_free_old_node(bo); if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index a1f3a18..770fbc5 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -517,6 +517,14 @@ struct drm_buffer_object { #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 +/* + * This flag indicates that a flag called with bo->mutex held has + * temporarily released the buffer object mutex, (usually to wait for something). + * and thus any post-lock validation needs to be rerun. + */ + +#define _DRM_BO_FLAG_UNLOCKED 0x00000004 + struct drm_mem_type_manager { int has_type; int use_type; @@ -682,8 +690,8 @@ extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, struct drm_buffer_object **bo); -extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, - int no_wait); +extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, + int no_wait, int check_unfenced); extern int drm_bo_mem_space(struct drm_buffer_object *bo, struct drm_bo_mem_reg *mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object *bo, @@ -695,7 +703,7 @@ extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, int kern_init); extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint64_t flags, uint64_t mask, uint32_t hint, - uint32_t fence_class, int use_old_fence_class, + uint32_t fence_class, struct drm_bo_info_rep *rep, struct drm_buffer_object **bo_rep); extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index a09bcdd..cabfb8f 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -748,12 +748,14 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, return NOPFN_REFAULT; } - err = drm_bo_wait(bo, 0, 0, 0); + err = drm_bo_wait(bo, 0, 1, 0, 1); if (err) { ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; goto out_unlock; } + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; + /* * If buffer happens to be in a non-mappable location, * move it to a mappable. @@ -806,6 +808,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, goto out_unlock; } out_unlock: + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); mutex_unlock(&bo->mutex); drm_bo_read_unlock(&dev->bm.bm_lock); return ret; diff --git a/linux-core/i915_execbuf.c b/linux-core/i915_execbuf.c index 088a269..804f3ac 100644 --- a/linux-core/i915_execbuf.c +++ b/linux-core/i915_execbuf.c @@ -144,7 +144,7 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, relocatee->offset = new_cmd_offset; if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) { - ret = drm_bo_wait(relocatee->buf, 0, 0, 0); + ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0); if (ret) return ret; relocatee->idle = I915_RELOC_IDLE; @@ -355,11 +355,9 @@ static int i915_update_relocatee(struct i915_relocatee_info *relocatee, if (relocatee->idle == I915_RELOC_UNCHECKED) { preempt_enable(); - ret = mutex_lock_interruptible(&relocatee->buf->mutex); - if (unlikely(ret)) - return -EAGAIN; + mutex_lock(&relocatee->buf->mutex); - ret = drm_bo_wait(relocatee->buf, 0, 0, 1); + ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0); if (ret == 0) relocatee->idle = I915_RELOC_IDLE; else { @@ -684,7 +682,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv, ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - req->bo_req.fence_class, 0, + req->bo_req.fence_class, NULL, &item->buffer); if (ret) { DRM_ERROR("error on handle validate %d\n", ret); commit 65dd0e68ff0e0e354925adb7d5fffeb0ffbb485c Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> Date: Fri Apr 11 09:36:12 2008 +0200 Fix up buffer manager locking. diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 144935d..4ef697b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1938,7 +1938,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, return -EINVAL; } - ret = drm_bo_read_lock(&dev->bm.bm_lock); + ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); if (ret) return ret; @@ -2449,7 +2449,7 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv); if (ret) return ret; @@ -2500,7 +2500,7 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; } - ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv); if (ret) return ret; @@ -2548,7 +2548,7 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ } if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { - ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); + ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv); if (ret) return ret; } diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c index 2795384..32ebfbe 100644 --- a/linux-core/drm_bo_lock.c +++ b/linux-core/drm_bo_lock.c @@ -49,7 +49,7 @@ * unmappable regions to mappable. It's a bug to leave kernel space with the * read lock held. * - * Both read- and write lock taking is interruptible for low signal-delivery + * Both read- and write lock taking may be interruptible for low signal-delivery * latency. The locking functions will return -EAGAIN if interrupted by a * signal. * @@ -71,14 +71,20 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock) if (unlikely(atomic_add_negative(-1, &lock->readers))) BUG(); if (atomic_read(&lock->readers) == 0) - wake_up_interruptible(&lock->queue); + wake_up_all(&lock->queue); } EXPORT_SYMBOL(drm_bo_read_unlock); -int drm_bo_read_lock(struct drm_bo_lock *lock) +int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible) { while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { int ret; + + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->write_lock_pending) == 0); + continue; + } ret = wait_event_interruptible (lock->queue, atomic_read(&lock->write_lock_pending) == 0); if (ret) @@ -87,8 +93,14 @@ int drm_bo_read_lock(struct drm_bo_lock *lock) while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { int ret; + + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->readers) != -1); + continue; + } ret = wait_event_interruptible - (lock->queue, atomic_add_unless(&lock->readers, 1, -1)); + (lock->queue, atomic_read(&lock->readers) != -1); if (ret) return -EAGAIN; } @@ -100,9 +112,7 @@ static int __drm_bo_write_unlock(struct drm_bo_lock *lock) { if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) return -EINVAL; - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) - return -EINVAL; - wake_up_interruptible(&lock->queue); + wake_up_all(&lock->queue); return 0; } @@ -116,21 +126,26 @@ static void drm_bo_write_lock_remove(struct drm_file *file_priv, BUG_ON(ret); } -int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible, + struct drm_file *file_priv) { int ret = 0; struct drm_device *dev; - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) - return -EINVAL; + atomic_inc(&lock->write_lock_pending); while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + if (!interruptible) { + wait_event(lock->queue, + atomic_read(&lock->readers) == 0); + continue; + } ret = wait_event_interruptible - (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0); + (lock->queue, atomic_read(&lock->readers) == 0); if (ret) { - atomic_set(&lock->write_lock_pending, 0); - wake_up_interruptible(&lock->queue); + atomic_dec(&lock->write_lock_pending); + wake_up_all(&lock->queue); return -EAGAIN; } } @@ -141,6 +156,7 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) * while holding it. */ + atomic_dec(&lock->write_lock_pending); dev = file_priv->minor->dev; mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(file_priv, &lock->base, 0); diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 5dabeae..23e5028 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -213,7 +213,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long bus_size; dev = bo->dev; - while(drm_bo_read_lock(&dev->bm.bm_lock)); + drm_bo_read_lock(&dev->bm.bm_lock, 0); mutex_lock(&bo->mutex); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index c32edac..a1f3a18 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -320,6 +320,8 @@ struct drm_ttm { int destroy; uint32_t mapping_offset; struct drm_ttm_backend *be; + unsigned long highest_lomem_entry; + unsigned long lowest_himem_entry; enum { ttm_bound, ttm_evicted, @@ -798,8 +800,10 @@ extern void drm_regs_init(struct drm_reg_manager *manager, extern void drm_bo_init_lock(struct drm_bo_lock *lock); extern void drm_bo_read_unlock(struct drm_bo_lock *lock); -extern int drm_bo_read_lock(struct drm_bo_lock *lock); +extern int drm_bo_read_lock(struct drm_bo_lock *lock, + int interruptible); extern int drm_bo_write_lock(struct drm_bo_lock *lock, + int interruptible, struct drm_file *file_priv); extern int drm_bo_write_unlock(struct drm_bo_lock *lock, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index ffda828..a09bcdd 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -738,7 +738,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, return NOPFN_SIGBUS; dev = bo->dev; - err = drm_bo_read_lock(&dev->bm.bm_lock); + err = drm_bo_read_lock(&dev->bm.bm_lock, 1); if (err) return NOPFN_REFAULT; diff --git a/linux-core/i915_execbuf.c b/linux-core/i915_execbuf.c index 729ee0c..088a269 100644 --- a/linux-core/i915_execbuf.c +++ b/linux-core/i915_execbuf.c @@ -845,7 +845,7 @@ int i915_execbuffer(struct drm_device *dev, void *data, if (exec_buf->num_buffers > dev_priv->max_validate_buffers) return -EINVAL; - ret = drm_bo_read_lock(&dev->bm.bm_lock); + ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); if (ret) return ret; |
From: <rn...@ke...> - 2008-07-17 03:42:49
|
bsd-core/drmP.h | 3 +++ bsd-core/drm_drv.c | 1 + bsd-core/drm_irq.c | 45 +++++++++++++++++++++++---------------------- bsd-core/drm_lock.c | 7 +++++++ bsd-core/drm_pci.c | 5 ++++- 5 files changed, 38 insertions(+), 23 deletions(-) New commits: commit b0e4619a396f2db8c594cd0a26fd2f0ab9358095 Author: Robert Noland <rn...@2h...> Date: Wed Jul 16 23:39:25 2008 -0400 FreeBSD: Fix radeon build diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 65d7fae..b0a23e9 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -1088,6 +1088,8 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); +#define drm_core_ioremap_wc drm_core_ioremap + /* Inline replacements for DRM_IOREMAP macros */ static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) commit 74cf1f91be7f4139601624af0343e3d411190dec Author: Owain Gordon Ainsworth <og...@op...> Date: Mon Jul 7 17:23:48 2008 +0100 BSD: change drm_locked_task*() to use the same scheme as linux. The current code can sleep in an interrupt handler, that is bad. So instead if we can't grab the lock, flag it and run the tasklet on unlock. Signed-off-by: Robert Noland <rn...@2h...> diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 88ea4e6..65d7fae 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -739,6 +739,7 @@ struct drm_device { struct mtx dev_lock; /* protects everything else */ #endif DRM_SPINTYPE drw_lock; + DRM_SPINTYPE tsk_lock; /* Usage Counters */ int open_count; /* Outstanding files open */ diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 740a8b5..9bd6079 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -206,6 +206,7 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist) mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF); mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF); mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF); + mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF); #endif id_entry = drm_find_description(pci_get_vendor(dev->device), diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index c3ecd28..a066cfc 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -578,41 +578,42 @@ static void drm_locked_task(void *context, int pending __unused) { struct drm_device *dev = context; - DRM_LOCK(); - for (;;) { - int ret; - - if (drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) - { - dev->lock.file_priv = NULL; /* kernel owned */ - dev->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); - break; /* Got lock */ - } + DRM_SPINLOCK(&dev->tsk_lock); - /* Contention */ -#if defined(__FreeBSD__) && __FreeBSD_version > 500000 - ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock, - PZERO | PCATCH, "drmlk2", 0); -#else - ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH, - "drmlk2", 0); -#endif - if (ret != 0) - return; + DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */ + if (dev->locked_task_call == NULL || + drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) { + DRM_UNLOCK(); + DRM_SPINUNLOCK(&dev->tsk_lock); + return; } + + dev->lock.file_priv = NULL; /* kernel owned */ + dev->lock.lock_time = jiffies; + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + DRM_UNLOCK(); dev->locked_task_call(dev); drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + + dev->locked_task_call = NULL; + + DRM_SPINUNLOCK(&dev->tsk_lock); } void drm_locked_tasklet(struct drm_device *dev, void (*tasklet)(struct drm_device *dev)) { + DRM_SPINLOCK(&dev->tsk_lock); + if (dev->locked_task_call != NULL) { + DRM_SPINUNLOCK(&dev->tsk_lock); + return; + } + dev->locked_task_call = tasklet; + DRM_SPINUNLOCK(&dev->tsk_lock); taskqueue_enqueue(taskqueue_swi, &dev->locked_task); } diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index 9101dec..80ebb71 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -180,6 +180,13 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context) return EINVAL; + DRM_SPINLOCK(&dev->tsk_lock); + if (dev->locked_task_call != NULL) { + dev->locked_task_call(dev); + dev->locked_task_call = NULL; + } + DRM_SPINUNLOCK(&dev->tsk_lock); + atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); DRM_LOCK(); commit 96580f660e5509dcf6c34de5630e3d36b156bcd5 Author: Robert Noland <rn...@wo...> Date: Mon Jun 9 08:54:53 2008 -0400 [FreeBSD] We aren't allowed to hold locks over bus_dma_tag_create or bus_dmamem_alloc. diff --git a/bsd-core/drm_pci.c b/bsd-core/drm_pci.c index 6b411ab..f23b2a5 100644 --- a/bsd-core/drm_pci.c +++ b/bsd-core/drm_pci.c @@ -71,6 +71,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size, return NULL; #ifdef __FreeBSD__ + DRM_UNLOCK(); ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ NULL, NULL, /* filtfunc, filtfuncargs */ @@ -79,6 +80,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size, &dmah->tag); if (ret != 0) { free(dmah, M_DRM); + DRM_LOCK(); return NULL; } @@ -87,9 +89,10 @@ drm_pci_alloc(struct drm_device *dev, size_t size, if (ret != 0) { bus_dma_tag_destroy(dmah->tag); free(dmah, M_DRM); + DRM_LOCK(); return NULL; } - + DRM_LOCK(); ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, drm_pci_busdma_callback, dmah, 0); if (ret != 0) { |
From: <rn...@ke...> - 2008-09-06 23:14:53
|
bsd-core/drmP.h | 10 ++++------ bsd-core/drm_auth.c | 3 ++- bsd-core/drm_bufs.c | 40 ++++++++++++++++++++-------------------- bsd-core/drm_context.c | 8 ++++---- bsd-core/drm_drv.c | 22 ++++++++++------------ bsd-core/drm_ioctl.c | 7 +++---- bsd-core/drm_irq.c | 6 +++--- bsd-core/drm_lock.c | 25 +++++++++++++------------ bsd-core/drm_scatter.c | 2 +- 9 files changed, 60 insertions(+), 63 deletions(-) New commits: commit 9ad5a6d0d73ba58ec7c2f66d0c5355185f2f68c6 Author: vehemens <veh...@ve...> Date: Sat Sep 6 15:33:47 2008 -0700 Pass lock data like linux and open. Signed-off-by: Robert Noland <rn...@2h...> diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 7244b35..f8705e3 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -911,13 +911,11 @@ void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); int drm_ctxbitmap_next(struct drm_device *dev); /* Locking IOCTL support (drm_lock.c) */ -int drm_lock_take(__volatile__ unsigned int *lock, - unsigned int context); -int drm_lock_transfer(struct drm_device *dev, - __volatile__ unsigned int *lock, +int drm_lock_take(struct drm_lock_data *lock_data, + unsigned int context); +int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context); -int drm_lock_free(struct drm_device *dev, - __volatile__ unsigned int *lock, +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); /* Buffer management support (drm_bufs.c) */ diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 860b828..b40d1da 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -774,7 +774,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) if (dev->driver->reclaim_buffers_locked != NULL) dev->driver->reclaim_buffers_locked(dev, file_priv); - drm_lock_free(dev, &dev->lock.hw_lock->lock, + drm_lock_free(&dev->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of @@ -790,8 +790,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) retcode = EINTR; break; } - if (drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { + if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -810,8 +809,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) } if (retcode == 0) { dev->driver->reclaim_buffers_locked(dev, file_priv); - drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT); + drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); } } diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 08a0dbc..e3a4cb6 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -540,7 +540,7 @@ static void drm_locked_task(void *context, int pending __unused) DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */ if (dev->locked_task_call == NULL || - drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) { + drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT) == 0) { DRM_UNLOCK(); DRM_SPINUNLOCK(&dev->tsk_lock); return; @@ -554,7 +554,7 @@ static void drm_locked_task(void *context, int pending __unused) dev->locked_task_call(dev); - drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); dev->locked_task_call = NULL; diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index 631df8e..31badd3 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -70,7 +70,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) DRM_LOCK(); for (;;) { - if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) { + if (drm_lock_take(&dev->lock, lock->context)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -129,9 +129,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); DRM_LOCK(); - drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT); - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { + if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) { DRM_ERROR("\n"); } DRM_UNLOCK(); @@ -139,8 +139,9 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) return 0; } -int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) +int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { + volatile unsigned int *lock = &lock_data->hw_lock->lock; unsigned int old, new; do { @@ -169,12 +170,12 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) /* This takes a lock forcibly and hands it to context. Should ONLY be used inside *_unlock to give lock to kernel before calling *_dma_schedule. */ -int drm_lock_transfer(struct drm_device *dev, - __volatile__ unsigned int *lock, unsigned int context) +int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { + volatile unsigned int *lock = &lock_data->hw_lock->lock; unsigned int old, new; - dev->lock.file_priv = NULL; + lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -183,12 +184,12 @@ int drm_lock_transfer(struct drm_device *dev, return 1; } -int drm_lock_free(struct drm_device *dev, - __volatile__ unsigned int *lock, unsigned int context) +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { + volatile unsigned int *lock = &lock_data->hw_lock->lock; unsigned int old, new; - dev->lock.file_priv = NULL; + lock_data->file_priv = NULL; do { old = *lock; new = 0; @@ -199,6 +200,6 @@ int drm_lock_free(struct drm_device *dev, context, _DRM_LOCKING_CONTEXT(old)); return 1; } - DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); + DRM_WAKEUP_INT((void *)&lock_data->lock_queue); return 0; } commit b8a9cebddc7063bc0dae889dac43359c0cb9bfa5 Author: vehemens <veh...@ve...> Date: Sat Sep 6 14:19:32 2008 -0700 Move order to end like linux. Signed-off-by: Robert Noland <rn...@2h...> diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 9d2b52d..2570641 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -36,22 +36,6 @@ #include "drmP.h" -/* - * Compute order. Can be made faster. - */ -int drm_order(unsigned long size) -{ - int order; - unsigned long tmp; - - for (order = 0, tmp = size; tmp >>= 1; ++order); - - if (size & ~(1 << order)) - ++order; - - return order; -} - /* Allocation of PCI memory resources (framebuffer, registers, etc.) for * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual * address for accessing them. Cleaned up at unload. @@ -1131,3 +1115,19 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) return retcode; } + +/* + * Compute order. Can be made faster. + */ +int drm_order(unsigned long size) +{ + int order; + unsigned long tmp; + + for (order = 0, tmp = size; tmp >>= 1; ++order); + + if (size & ~(1 << order)) + ++order; + + return order; +} commit 0808cf923d4a851b100d9b7022cb82f5e1f52e28 Author: vehemens <veh...@ve...> Date: Sat Sep 6 14:16:51 2008 -0700 Style white space cleanup part 2. Signed-off-by: Robert Noland <rn...@2h...> diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c index 2f83618..455a716 100644 --- a/bsd-core/drm_auth.c +++ b/bsd-core/drm_auth.c @@ -75,7 +75,8 @@ static int drm_add_magic(struct drm_device *dev, drm_file_t *priv, hash = drm_hash_magic(magic); entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT); - if (!entry) return ENOMEM; + if (!entry) + return ENOMEM; entry->magic = magic; entry->priv = priv; entry->next = NULL; diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 94f5138..9d2b52d 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -611,14 +611,14 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque entry->seglist[entry->seg_count++] = dmah; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ %p\n", - dma->page_count + page_count, - (char *)dmah->vaddr + PAGE_SIZE * i); + dma->page_count + page_count, + (char *)dmah->vaddr + PAGE_SIZE * i); temp_pagelist[dma->page_count + page_count++] = (long)dmah->vaddr + PAGE_SIZE * i; } for (offset = 0; - offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { + offset + size <= total && entry->buf_count < count; + offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c index 24f34fd..bca899c 100644 --- a/bsd-core/drm_context.c +++ b/bsd-core/drm_context.c @@ -108,7 +108,7 @@ int drm_ctxbitmap_init(struct drm_device *dev) DRM_LOCK(); dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO); - if ( dev->ctx_bitmap == NULL ) { + if (dev->ctx_bitmap == NULL) { DRM_UNLOCK(); return ENOMEM; } @@ -214,9 +214,9 @@ int drm_context_switch_complete(struct drm_device *dev, int new) /* If a context switch is ever initiated when the kernel holds the lock, release that lock here. */ - clear_bit(0, &dev->context_flag); + clear_bit(0, &dev->context_flag); - return 0; + return 0; } int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -229,7 +229,7 @@ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv) bzero(&ctx, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - if (DRM_COPY_TO_USER( &res->contexts[i], + if (DRM_COPY_TO_USER(&res->contexts[i], &ctx, sizeof(ctx))) return EFAULT; } diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index 9056c76..860b828 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -408,7 +408,7 @@ static int drm_firstopen(struct drm_device *dev) return i; } - for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) { + for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } @@ -474,7 +474,7 @@ static int drm_lastclose(struct drm_device *dev) */ for (entry = dev->agp->memory; entry; entry = nexte) { nexte = entry->next; - if ( entry->bound ) + if (entry->bound) drm_agp_unbind_memory(entry->handle); drm_agp_free_memory(entry->handle); free(entry, M_DRM); @@ -543,7 +543,7 @@ static int drm_load(struct drm_device *dev) dev->types[5] = _DRM_STAT_UNLOCKS; for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++) - atomic_set( &dev->counts[i], 0 ); + atomic_set(&dev->counts[i], 0); if (dev->driver->load != NULL) { DRM_LOCK(); @@ -718,7 +718,7 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) retcode = drm_open_helper(kdev, flags, fmt, p, dev); if (!retcode) { - atomic_inc( &dev->counts[_DRM_STAT_OPENS] ); + atomic_inc(&dev->counts[_DRM_STAT_OPENS]); DRM_LOCK(); #ifdef __FreeBSD__ device_busy(dev->device); @@ -794,7 +794,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) DRM_KERNEL_CONTEXT)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; - atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); + atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ @@ -837,7 +837,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p) */ done: - atomic_inc( &dev->counts[_DRM_STAT_CLOSES] ); + atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); #ifdef __FreeBSD__ device_unbusy(dev->device); #endif @@ -871,7 +871,7 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, return EINVAL; } - atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); + atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; #ifdef __FreeBSD__ diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c index 0e99661..ad6bd81 100644 --- a/bsd-core/drm_ioctl.c +++ b/bsd-core/drm_ioctl.c @@ -213,12 +213,11 @@ int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv) for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) - stats->data[i].value - = (dev->lock.hw_lock - ? dev->lock.hw_lock->lock : 0); + stats->data[i].value = + (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else stats->data[i].value = atomic_read(&dev->counts[i]); - stats->data[i].type = dev->types[i]; + stats->data[i].type = dev->types[i]; } stats->count = dev->counters; diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 241908c..08a0dbc 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -103,7 +103,7 @@ static void drm_vblank_cleanup(struct drm_device *dev) /* Bail if the driver didn't call drm_vblank_init() */ if (dev->num_crtcs == 0) - return; + return; DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags); callout_stop(&dev->vblank_disable_timer); diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index fe66eca..631df8e 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -196,7 +196,7 @@ int drm_lock_free(struct drm_device *dev, if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); + context, _DRM_LOCKING_CONTEXT(old)); return 1; } DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c index 60d098c..550e6f8 100644 --- a/bsd-core/drm_scatter.c +++ b/bsd-core/drm_scatter.c @@ -123,7 +123,7 @@ int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv) if (!entry || entry->handle != request->handle) return EINVAL; - DRM_DEBUG("sg free virtual = 0x%lx\n", entry->handle); + DRM_DEBUG("sg free virtual = 0x%lx\n", entry->handle); drm_sg_cleanup(entry); |
From: <dar...@ke...> - 2008-09-17 04:41:24
|
shared-core/nouveau_object.c | 2 shared-core/nv50_graph.c | 2676 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 2676 insertions(+), 2 deletions(-) New commits: commit 4d2f1257fab84bf66bd1154a1c829317584fd5f7 Author: Ben Skeggs <sk...@gm...> Date: Wed Sep 17 15:13:27 2008 +1000 nv50: add ctxprog for chipset 0x50 diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 86274c6..d6fa1d0 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -83,6 +83,74 @@ nv50_graph_init_regs(struct drm_device *dev) NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */); } +static uint32_t nv50_ctx_voodoo[] = { + 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, + 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, + 0x00700009, 0x00417e4d, 0x00401e44, 0x00401e05, 0x00401e0d, 0x00415a06, + 0x00600005, 0x004015c5, 0x00600011, 0x00401c0b, 0x0090ffff, 0x0091ffff, + 0x00200020, 0x00600008, 0x0050004c, 0x00600009, 0x00415a45, 0x0041754d, + 0x0070009d, 0x004022cf, 0x0070009f, 0x0050009f, 0x00401fc0, 0x00200080, + 0x00600008, 0x00401f4f, 0x00401fc0, 0x004025cc, 0x00700081, 0x00200000, + 0x00600006, 0x00700000, 0x00111bfc, 0x00700080, 0x00700083, 0x00200047, + 0x00600006, 0x0011020a, 0x002005c0, 0x00600007, 0x00300000, 0x00c000ff, + 0x00c800ff, 0x00416507, 0x00202627, 0x008000ff, 0x00403c8c, 0x005000cb, + 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, 0x00170202, 0x0011020a, + 0x00200032, 0x0010020d, 0x001b0242, 0x00120302, 0x00140402, 0x00180500, + 0x00130509, 0x00150550, 0x00110605, 0x001e0607, 0x00110700, 0x00110900, + 0x00110902, 0x00110a00, 0x00160b02, 0x00110b28, 0x00140b2b, 0x00110c01, + 0x00111400, 0x00111405, 0x00111407, 0x00111409, 0x0011140b, 0x002000ea, + 0x00101500, 0x0040640f, 0x0040644b, 0x00213700, 0x00600007, 0x00200440, + 0x008800ff, 0x0070008f, 0x0040648c, 0x005000cb, 0x00000000, 0x001118f8, + 0x0020002b, 0x00101a05, 0x00131c00, 0x00111c04, 0x00141c20, 0x00111c25, + 0x00131c40, 0x00111c44, 0x00141c60, 0x00111c65, 0x00131c80, 0x00111c84, + 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00111cc4, 0x00141ce0, 0x00111ce5, + 0x00131d00, 0x00111d04, 0x00141d20, 0x00111d25, 0x00131d40, 0x00111d44, + 0x00141d60, 0x00111d65, 0x00131f00, 0x00191f40, 0x00409ee0, 0x00200217, + 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, + 0x00122100, 0x00122103, 0x00162200, 0x0040960f, 0x0040964b, 0x00213700, + 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040968c, 0x005000cb, + 0x00000000, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380, + 0x0011238b, 0x00192394, 0x0040b0e1, 0x00200285, 0x00600006, 0x00200044, + 0x00102480, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, 0x00122503, + 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, 0x00122780, + 0x0011278b, 0x00192794, 0x0040cce2, 0x002002f3, 0x00600006, 0x00200044, + 0x00102880, 0x001128c6, 0x001528c9, 0x0040c00f, 0x0040c04b, 0x00213700, + 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040c08c, 0x005000cb, + 0x00000000, 0x001928d0, 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, + 0x00112a80, 0x00112b00, 0x00112b02, 0x00122b80, 0x00112b8b, 0x00192b94, + 0x0040dee3, 0x00200361, 0x00600006, 0x00200044, 0x00102c80, 0x00112cc6, + 0x00152cc9, 0x00192cd0, 0x00122d00, 0x00122d03, 0x00162e00, 0x00122e07, + 0x00112e80, 0x00112f00, 0x00112f02, 0x00122f80, 0x00112f8b, 0x00192f94, + 0x0040fae4, 0x002003cf, 0x00600006, 0x00200044, 0x00103080, 0x0040ec0f, + 0x0040ec4b, 0x00213700, 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, + 0x0040ec8c, 0x005000cb, 0x00000000, 0x001130c6, 0x001530c9, 0x001930d0, + 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, + 0x00113302, 0x00123380, 0x0011338b, 0x00193394, 0x00410ce5, 0x0020043d, + 0x00600006, 0x00200044, 0x00103480, 0x001134c6, 0x001534c9, 0x001934d0, + 0x00123500, 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, + 0x00113702, 0x00123780, 0x0011378b, 0x00193794, 0x004128e6, 0x002004ab, + 0x00600006, 0x00200044, 0x00103880, 0x00411a0f, 0x00411a4b, 0x00213700, + 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x00411a8c, 0x005000cb, + 0x00000000, 0x001138c6, 0x001538c9, 0x001938d0, 0x00123900, 0x00123903, + 0x00163a00, 0x00123a07, 0x00113a80, 0x00113b00, 0x00113b02, 0x00123b80, + 0x00113b8b, 0x00193b94, 0x00413ae7, 0x00200519, 0x00600006, 0x00200044, + 0x00103c80, 0x00113cc6, 0x00153cc9, 0x00193cd0, 0x00123d00, 0x00123d03, + 0x00163e00, 0x00123e07, 0x00113e80, 0x00113f00, 0x00113f02, 0x00123f80, + 0x00113f8b, 0x00193f94, 0x00000000, 0x0041410f, 0x005000cb, 0x00213700, + 0x00600007, 0x00200440, 0x008800ff, 0x005000cb, 0x00414487, 0x0060000a, + 0x00000000, 0x00415300, 0x007000a0, 0x00700080, 0x002005c0, 0x00600007, + 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, 0x00200000, + 0x00600006, 0x00111bfe, 0x0041754d, 0x00700000, 0x00200000, 0x00600006, + 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, 0x00600004, + 0x0050004a, 0x00415f88, 0x0060000b, 0x00200000, 0x00600006, 0x00700000, + 0x0041750b, 0x00111bfd, 0x00402e4d, 0x00202627, 0x008000fd, 0x005000cb, + 0x00c00002, 0x002005c0, 0x00600007, 0x0020015f, 0x00800002, 0x005000cb, + 0x00c01802, 0x002024c8, 0x00800002, 0x005000cb, 0x00403a4d, 0x0060000b, + 0x0041734d, 0x00700001, 0x00700003, 0x00417906, 0x00417a05, 0x0060000d, + 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, 0x0070001c, + 0x0060000c, ~0 +}; + static uint32_t nv84_ctx_voodoo[] = { 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89, 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff, @@ -322,6 +390,9 @@ nv50_graph_init_ctxctl(struct drm_device *dev) DRM_DEBUG("\n"); switch (dev_priv->chipset) { + case 0x50: + voodoo = nv50_ctx_voodoo; + break; case 0x84: voodoo = nv84_ctx_voodoo; break; commit 301be1dc9be8a83f5092417573967f35766227bc Author: Ben Skeggs <sk...@gm...> Date: Wed Sep 17 15:02:54 2008 +1000 nv50: add ctxprog for chipset 0xaa diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 1a88f28..86274c6 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -258,6 +258,61 @@ static uint32_t nv92_ctx_voodoo[] = { 0x0070000E, 0x0070001C, 0x0060000C, ~0 }; +static uint32_t nvaa_ctx_voodoo[] = { + 0x0070009c, 0x00300000, 0x0044f109, 0x00402d09, 0x0040e551, 0x00400a44, + 0x00400a05, 0x00400a0d, 0x0070008e, 0x0040124d, 0x0070009d, 0x0045004d, + 0x00700097, 0x00450121, 0x004446a1, 0x0044764d, 0x0044824d, 0x0070001d, + 0x00401806, 0x00600005, 0x00444445, 0x0044308b, 0x00401845, 0x0040234d, + 0x00700081, 0x00401ccf, 0x0070009f, 0x0050009f, 0x0044dc4d, 0x00700017, + 0x0040230b, 0x00447d4d, 0x00450221, 0x004456a1, 0x007000a0, 0x00700001, + 0x00700003, 0x00402706, 0x00402805, 0x0060000d, 0x00700005, 0x0070000d, + 0x00700006, 0x00700002, 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, + 0x00000000, 0x0090ffff, 0x0091ffff, 0x0044d44d, 0x00600009, 0x0048004d, + 0x00700096, 0x00403acf, 0x0070009f, 0x0050009f, 0x0040e551, 0x004036c0, + 0x00200080, 0x00600008, 0x0040364f, 0x004036c0, 0x00403ecc, 0x00403651, + 0x00700016, 0x0048004d, 0x00600011, 0x0048004d, 0x0044364d, 0x0070008e, + 0x00700081, 0x0044704d, 0x00447d4d, 0x00700083, 0x00300000, 0x00212740, + 0x00600007, 0x00c00b01, 0x00200022, 0x00800001, 0x005000cb, 0x00c000ff, + 0x00445e4d, 0x0048004d, 0x0044ce08, 0x0044734d, 0x00448b4d, 0x00445e4d, + 0x0044e24d, 0x0044764d, 0x0044824d, 0x0048004d, 0x00700083, 0x0045034d, + 0x00a0023f, 0x00200040, 0x00600006, 0x0044fc4d, 0x00448d4d, 0x002001d0, + 0x0044b860, 0x00200280, 0x0038ffff, 0x0044cc4d, 0x00300000, 0x005000cb, + 0x00451c4d, 0x005000cb, 0x0044d007, 0x0048004d, 0x0044794d, 0x00111bfc, + 0x0048004d, 0x0044794d, 0x00111bfd, 0x0048004d, 0x0044794d, 0x00111bfe, + 0x0048004d, 0x00200000, 0x00700000, 0x00600006, 0x0048004d, 0x00200001, + 0x00600006, 0x0044fc4d, 0x0011020a, 0x0048004d, 0x00300000, 0x00c3ffff, + 0x00200000, 0x00600007, 0x00700000, 0x00200008, 0x008000ff, 0x005000cb, + 0x0048004d, 0x00000000, 0x0048004d, 0x00000000, 0x00170202, 0x00200032, + 0x0010020d, 0x001e0242, 0x001102c0, 0x00120302, 0x00150402, 0x00180500, + 0x00130509, 0x00150550, 0x00110605, 0x00200013, 0x00100607, 0x00110700, + 0x00110900, 0x00120902, 0x00110a00, 0x00160b02, 0x00120b28, 0x00140b2b, + 0x00110c01, 0x00110d01, 0x00111400, 0x00111405, 0x00111407, 0x00111409, + 0x0011140b, 0x002000d4, 0x00101500, 0x00141a05, 0x00131a0c, 0x00131c00, + 0x00131c04, 0x00141c20, 0x00131c25, 0x00131f00, 0x00131f04, 0x00111f08, + 0x00111f0b, 0x00200015, 0x00101f40, 0x0048004d, 0x00600006, 0x00451c4d, + 0x00112020, 0x00112022, 0x00200085, 0x00102040, 0x001120c8, 0x001420ca, + 0x001b20cf, 0x00122100, 0x00122103, 0x00162140, 0x00122147, 0x00122153, + 0x001121a0, 0x001221c0, 0x001121cb, 0x001121d4, 0x001521d8, 0x0048004d, + 0x00000000, 0x0048004d, 0x0060000b, 0x0048004d, 0x0060000a, 0x0048004d, + 0x0060000b, 0x0040d24d, 0x00200020, 0x00600008, 0x0050004c, 0x0048004d, + 0x002003e8, 0x00600008, 0x0050004c, 0x0048004d, 0x00600004, 0x0050004a, + 0x0048004d, 0x00c000ff, 0x00c800ff, 0x0048004d, 0x00c000ff, 0x00c800ff, + 0x0048004d, 0x00700016, 0x0070008e, 0x00700082, 0x00500041, 0x0044d84d, + 0x00700095, 0x005000d1, 0x00600016, 0x00500052, 0x00700002, 0x00700015, + 0x0040284d, 0x0070008e, 0x0044d44d, 0x00200000, 0x00600007, 0x00300000, + 0x00c000ff, 0x00200000, 0x008000ff, 0x00700009, 0x0070000e, 0x0048004d, + 0x00700080, 0x00480017, 0x00700000, 0x0048004d, 0x0048004d, 0x0048004d, + 0x0048004d, 0x0070008e, 0x0044d44d, 0x00700083, 0x0044df4d, 0x00450c4d, + 0x0070000f, 0x00410b8c, 0x005000cb, 0x0048004d, 0x00200280, 0x00600007, + 0x00452307, 0x00451187, 0x0048004d, 0x00000000, 0x00202070, 0x0044fc4d, + 0x008000ff, 0x0048004d, 0x00210600, 0x00600007, 0x00200428, 0x0044fc4d, + 0x008800ff, 0x0048004d, 0x0048000f, 0x0048004b, 0x0045164d, 0x0070008f, + 0x0048008c, 0x005000cb, 0x0048004d, 0x00202070, 0x0044fc4d, 0x008000fd, + 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200161, 0x0044fc4d, + 0x00800002, 0x005000cb, 0x00c00002, 0x00201f0e, 0x0044fc4d, 0x00800002, + 0x005000cb, 0x0048004d, ~0 +} + static int nv50_graph_init_ctxctl(struct drm_device *dev) { @@ -276,6 +331,9 @@ nv50_graph_init_ctxctl(struct drm_device *dev) case 0x92: voodoo = nv92_ctx_voodoo; break; + case 0xaa: + voodoo = nvaa_ctx_voodoo; + break; default: DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); return -EINVAL; commit f152482bdecaa16db36b20b445a480445d18048c Author: Ben Skeggs <sk...@gm...> Date: Wed Sep 17 14:49:04 2008 +1000 nv50: add support for chipset 0x92 diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 07b5a39..5a44d41 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -942,7 +942,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) /* RAMFC */ size += 0x1000; /* PGRAPH context */ - size += 0x60000; + size += 0x70000; } DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 1300c83..1a88f28 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -187,6 +187,77 @@ static uint32_t nv86_ctx_voodoo[] = { 0x0060000c, ~0 }; +static uint32_t nv92_ctx_voodoo[] = { + 0x0070008E, 0x0070009C, 0x00200020, 0x00600008, 0x0050004C, 0x00400E89, + 0x00200000, 0x00600007, 0x00300000, 0x00C000FF, 0x00200000, 0x008000FF, + 0x00700009, 0x0041924D, 0x00402944, 0x00402905, 0x0040290D, 0x00416E06, + 0x00600005, 0x004015C5, 0x00600011, 0x0040270B, 0x004021C5, 0x00700000, + 0x00700081, 0x00600004, 0x0050004A, 0x00219600, 0x00600007, 0x00C02701, + 0x0020002E, 0x00800001, 0x005000CB, 0x0090FFFF, 0x0091FFFF, 0x00200020, + 0x00600008, 0x0050004C, 0x00600009, 0x00416E45, 0x0041894D, 0x0070009D, + 0x00402DCF, 0x0070009F, 0x0050009F, 0x00402AC0, 0x00200080, 0x00600008, + 0x00402A4F, 0x00402AC0, 0x004030CC, 0x00700081, 0x00200000, 0x00600006, + 0x00700000, 0x00111BFC, 0x00700083, 0x00300000, 0x00219600, 0x00600007, + 0x00C00A01, 0x0020001E, 0x00800001, 0x005000CB, 0x00C000FF, 0x00700080, + 0x00700083, 0x00200047, 0x00600006, 0x0011020A, 0x00200540, 0x00600007, + 0x00300000, 0x00C000FF, 0x00C800FF, 0x00417907, 0x00202DD2, 0x008000FF, + 0x0040508C, 0x005000CB, 0x00A0023F, 0x00200040, 0x00600006, 0x0070000F, + 0x00170202, 0x0011020A, 0x00200032, 0x0010020D, 0x001C0242, 0x00120302, + 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000F, + 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110A00, 0x00160B02, + 0x00120B28, 0x00140B2B, 0x00110C01, 0x00111400, 0x00111405, 0x00111407, + 0x00111409, 0x0011140B, 0x002000CB, 0x00101500, 0x0040790F, 0x0040794B, + 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040798C, + 0x005000CB, 0x00000000, 0x00141A05, 0x00131A0C, 0x00131C00, 0x00121C04, + 0x00141C20, 0x00111C25, 0x00131C40, 0x00121C44, 0x00141C60, 0x00111C65, + 0x00131C80, 0x00121C84, 0x00141CA0, 0x00111CA5, 0x00131CC0, 0x00121CC4, + 0x00141CE0, 0x00111CE5, 0x00131F00, 0x00191F40, 0x0040A1E0, 0x002001C9, + 0x00600006, 0x00200044, 0x00102080, 0x001120C6, 0x001520C9, 0x001920D0, + 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300, + 0x00112302, 0x00122380, 0x0011238B, 0x00112394, 0x0011239C, 0x0040BEE1, + 0x00200230, 0x00600006, 0x00200044, 0x00102480, 0x0040AF0F, 0x0040AF4B, + 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040AF8C, + 0x005000CB, 0x00000000, 0x001124C6, 0x001524C9, 0x001924D0, 0x00122500, + 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, + 0x00122780, 0x0011278B, 0x00112794, 0x0011279C, 0x0040D1E2, 0x00200297, + 0x00600006, 0x00200044, 0x00102880, 0x001128C6, 0x001528C9, 0x001928D0, + 0x00122900, 0x00122903, 0x00162A00, 0x00122A07, 0x00112A80, 0x00112B00, + 0x00112B02, 0x00122B80, 0x00112B8B, 0x00112B94, 0x00112B9C, 0x0040EEE3, + 0x002002FE, 0x00600006, 0x00200044, 0x00102C80, 0x0040DF0F, 0x0040DF4B, + 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040DF8C, + 0x005000CB, 0x00000000, 0x00112CC6, 0x00152CC9, 0x00192CD0, 0x00122D00, + 0x00122D03, 0x00162E00, 0x00122E07, 0x00112E80, 0x00112F00, 0x00112F02, + 0x00122F80, 0x00112F8B, 0x00112F94, 0x00112F9C, 0x004101E4, 0x00200365, + 0x00600006, 0x00200044, 0x00103080, 0x001130C6, 0x001530C9, 0x001930D0, + 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300, + 0x00113302, 0x00123380, 0x0011338B, 0x00113394, 0x0011339C, 0x00411EE5, + 0x002003CC, 0x00600006, 0x00200044, 0x00103480, 0x00410F0F, 0x00410F4B, + 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00410F8C, + 0x005000CB, 0x00000000, 0x001134C6, 0x001534C9, 0x001934D0, 0x00123500, + 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702, + 0x00123780, 0x0011378B, 0x00113794, 0x0011379C, 0x004131E6, 0x00200433, + 0x00600006, 0x00200044, 0x00103880, 0x001138C6, 0x001538C9, 0x001938D0, + 0x00123900, 0x00123903, 0x00163A00, 0x00123A07, 0x00113A80, 0x00113B00, + 0x00113B02, 0x00123B80, 0x00113B8B, 0x00113B94, 0x00113B9C, 0x00414EE7, + 0x0020049A, 0x00600006, 0x00200044, 0x00103C80, 0x00413F0F, 0x00413F4B, + 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00413F8C, + 0x005000CB, 0x00000000, 0x00113CC6, 0x00153CC9, 0x00193CD0, 0x00123D00, + 0x00123D03, 0x00163E00, 0x00123E07, 0x00113E80, 0x00113F00, 0x00113F02, + 0x00123F80, 0x00113F8B, 0x00113F94, 0x00113F9C, 0x00000000, 0x0041550F, + 0x005000CB, 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x005000CB, + 0x00415887, 0x0060000A, 0x00000000, 0x00416700, 0x007000A0, 0x00700080, + 0x00200540, 0x00600007, 0x00200004, 0x00C000FF, 0x008000FF, 0x005000CB, + 0x00700000, 0x00200000, 0x00600006, 0x00111BFE, 0x0041894D, 0x00700000, + 0x00200000, 0x00600006, 0x00111BFE, 0x00700080, 0x0070001D, 0x0040114D, + 0x00700081, 0x00600004, 0x0050004A, 0x00417388, 0x0060000B, 0x00200000, + 0x00600006, 0x00700000, 0x0041890B, 0x00111BFD, 0x0040424D, 0x00202DD2, + 0x008000FD, 0x005000CB, 0x00C00002, 0x00200540, 0x00600007, 0x00200160, + 0x00800002, 0x005000CB, 0x00C01802, 0x00202C72, 0x00800002, 0x005000CB, + 0x00404E4D, 0x0060000B, 0x0041874D, 0x00700001, 0x00700003, 0x00418D06, + 0x00418E05, 0x0060000D, 0x00700005, 0x0070000D, 0x00700006, 0x0070000B, + 0x0070000E, 0x0070001C, 0x0060000C, ~0 +}; + static int nv50_graph_init_ctxctl(struct drm_device *dev) { @@ -202,6 +273,9 @@ nv50_graph_init_ctxctl(struct drm_device *dev) case 0x86: voodoo = nv86_ctx_voodoo; break; + case 0x92: + voodoo = nv92_ctx_voodoo; + break; default: DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); return -EINVAL; @@ -2059,6 +2133,2474 @@ nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) INSTANCE_WR(ctx, 0x5b700/4, 0x00000001); } +static void +nv92_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ctx = ref->gpuobj; + + INSTANCE_WR(ctx, 0x10C/4, 0x30); + INSTANCE_WR(ctx, 0x1D4/4, 0x3); + INSTANCE_WR(ctx, 0x1D8/4, 0x1000); + INSTANCE_WR(ctx, 0x218/4, 0xFE0C); + INSTANCE_WR(ctx, 0x22C/4, 0x1000); + INSTANCE_WR(ctx, 0x258/4, 0x187); + INSTANCE_WR(ctx, 0x26C/4, 0x1018); + INSTANCE_WR(ctx, 0x270/4, 0xFF); + INSTANCE_WR(ctx, 0x2AC/4, 0x4); + INSTANCE_WR(ctx, 0x2B0/4, 0x42500DF); + INSTANCE_WR(ctx, 0x2B8/4, 0x600); + INSTANCE_WR(ctx, 0x2D0/4, 0x1000000); + INSTANCE_WR(ctx, 0x2D4/4, 0xFF); + INSTANCE_WR(ctx, 0x2DC/4, 0x400); + INSTANCE_WR(ctx, 0x2F4/4, 0x1); + INSTANCE_WR(ctx, 0x2F8/4, 0x80); + INSTANCE_WR(ctx, 0x2FC/4, 0x4); + INSTANCE_WR(ctx, 0x318/4, 0x2); + INSTANCE_WR(ctx, 0x31C/4, 0x1); + INSTANCE_WR(ctx, 0x328/4, 0x1); + INSTANCE_WR(ctx, 0x32C/4, 0x100); + INSTANCE_WR(ctx, 0x344/4, 0x2); + INSTANCE_WR(ctx, 0x348/4, 0x1); + INSTANCE_WR(ctx, 0x34C/4, 0x1); + INSTANCE_WR(ctx, 0x35C/4, 0x1); + INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF); + INSTANCE_WR(ctx, 0x364/4, 0x1FFF); + INSTANCE_WR(ctx, 0x36C/4, 0x1); + INSTANCE_WR(ctx, 0x370/4, 0x1); + INSTANCE_WR(ctx, 0x378/4, 0x1); + INSTANCE_WR(ctx, 0x37C/4, 0x1); + INSTANCE_WR(ctx, 0x380/4, 0x1); + INSTANCE_WR(ctx, 0x384/4, 0x4); + INSTANCE_WR(ctx, 0x388/4, 0x1); + INSTANCE_WR(ctx, 0x38C/4, 0x1); + INSTANCE_WR(ctx, 0x390/4, 0x1); + INSTANCE_WR(ctx, 0x394/4, 0x7); + INSTANCE_WR(ctx, 0x398/4, 0x1); + INSTANCE_WR(ctx, 0x39C/4, 0x7); + INSTANCE_WR(ctx, 0x3A0/4, 0x1); + INSTANCE_WR(ctx, 0x3A4/4, 0x1); + INSTANCE_WR(ctx, 0x3A8/4, 0x1); + INSTANCE_WR(ctx, 0x3BC/4, 0x1); + INSTANCE_WR(ctx, 0x3C0/4, 0x100); + INSTANCE_WR(ctx, 0x3C8/4, 0x1); + INSTANCE_WR(ctx, 0x3D4/4, 0x100); + INSTANCE_WR(ctx, 0x3D8/4, 0x1); + INSTANCE_WR(ctx, 0x3DC/4, 0x100); + INSTANCE_WR(ctx, 0x3E4/4, 0x1); + INSTANCE_WR(ctx, 0x3F0/4, 0x100); + INSTANCE_WR(ctx, 0x404/4, 0x4); + INSTANCE_WR(ctx, 0x408/4, 0x70); + INSTANCE_WR(ctx, 0x40C/4, 0x80); + INSTANCE_WR(ctx, 0x420/4, 0xC); + INSTANCE_WR(ctx, 0x428/4, 0x8); + INSTANCE_WR(ctx, 0x42C/4, 0x14); + INSTANCE_WR(ctx, 0x434/4, 0x29); + INSTANCE_WR(ctx, 0x438/4, 0x27); + INSTANCE_WR(ctx, 0x43C/4, 0x26); + INSTANCE_WR(ctx, 0x440/4, 0x8); + INSTANCE_WR(ctx, 0x444/4, 0x4); + INSTANCE_WR(ctx, 0x448/4, 0x27); + INSTANCE_WR(ctx, 0x454/4, 0x1); + INSTANCE_WR(ctx, 0x458/4, 0x2); + INSTANCE_WR(ctx, 0x45C/4, 0x3); + INSTANCE_WR(ctx, 0x460/4, 0x4); + INSTANCE_WR(ctx, 0x464/4, 0x5); + INSTANCE_WR(ctx, 0x468/4, 0x6); + INSTANCE_WR(ctx, 0x46C/4, 0x7); + INSTANCE_WR(ctx, 0x470/4, 0x1); + INSTANCE_WR(ctx, 0x4B4/4, 0xCF); + INSTANCE_WR(ctx, 0x4E4/4, 0x80); + INSTANCE_WR(ctx, 0x4E8/4, 0x4); + INSTANCE_WR(ctx, 0x4EC/4, 0x4); + INSTANCE_WR(ctx, 0x4F0/4, 0x3); + INSTANCE_WR(ctx, 0x4F4/4, 0x1); + INSTANCE_WR(ctx, 0x500/4, 0x12); + INSTANCE_WR(ctx, 0x504/4, 0x10); + INSTANCE_WR(ctx, 0x508/4, 0xC); + INSTANCE_WR(ctx, 0x50C/4, 0x1); + INSTANCE_WR(ctx, 0x51C/4, 0x4); + INSTANCE_WR(ctx, 0x520/4, 0x2); + INSTANCE_WR(ctx, 0x524/4, 0x4); + INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF); + INSTANCE_WR(ctx, 0x534/4, 0x1FFF); + INSTANCE_WR(ctx, 0x55C/4, 0x4); + INSTANCE_WR(ctx, 0x560/4, 0x14); + INSTANCE_WR(ctx, 0x564/4, 0x1); + INSTANCE_WR(ctx, 0x570/4, 0x2); + INSTANCE_WR(ctx, 0x57C/4, 0x1); + INSTANCE_WR(ctx, 0x584/4, 0x2); + INSTANCE_WR(ctx, 0x588/4, 0x1000); + INSTANCE_WR(ctx, 0x58C/4, 0xE00); + INSTANCE_WR(ctx, 0x590/4, 0x1000); + INSTANCE_WR(ctx, 0x594/4, 0x1E00); + INSTANCE_WR(ctx, 0x59C/4, 0x1); + INSTANCE_WR(ctx, 0x5A0/4, 0x1); + INSTANCE_WR(ctx, 0x5A4/4, 0x1); + INSTANCE_WR(ctx, 0x5A8/4, 0x1); + INSTANCE_WR(ctx, 0x5AC/4, 0x1); + INSTANCE_WR(ctx, 0x5BC/4, 0x200); + INSTANCE_WR(ctx, 0x5C4/4, 0x1); + INSTANCE_WR(ctx, 0x5C8/4, 0x70); + INSTANCE_WR(ctx, 0x5CC/4, 0x80); + INSTANCE_WR(ctx, 0x5D8/4, 0x1); + INSTANCE_WR(ctx, 0x5DC/4, 0x70); + INSTANCE_WR(ctx, 0x5E0/4, 0x80); + INSTANCE_WR(ctx, 0x5F0/4, 0x1); + INSTANCE_WR(ctx, 0x5F4/4, 0xCF); + INSTANCE_WR(ctx, 0x5FC/4, 0x1); + INSTANCE_WR(ctx, 0x60C/4, 0xCF); + INSTANCE_WR(ctx, 0x614/4, 0x2); + INSTANCE_WR(ctx, 0x61C/4, 0x1); + INSTANCE_WR(ctx, 0x624/4, 0x1); + INSTANCE_WR(ctx, 0x62C/4, 0xCF); + INSTANCE_WR(ctx, 0x630/4, 0xCF); + INSTANCE_WR(ctx, 0x634/4, 0x1); + INSTANCE_WR(ctx, 0x63C/4, 0x1F80); + INSTANCE_WR(ctx, 0x654/4, 0x3B74F821); + INSTANCE_WR(ctx, 0x658/4, 0x89058001); + INSTANCE_WR(ctx, 0x660/4, 0x1000); + INSTANCE_WR(ctx, 0x664/4, 0x1F); + INSTANCE_WR(ctx, 0x668/4, 0x27C10FA); + INSTANCE_WR(ctx, 0x66C/4, 0x400000C0); + INSTANCE_WR(ctx, 0x670/4, 0xB7892080); + INSTANCE_WR(ctx, 0x67C/4, 0x3B74F821); + INSTANCE_WR(ctx, 0x680/4, 0x89058001); + INSTANCE_WR(ctx, 0x688/4, 0x1000); + INSTANCE_WR(ctx, 0x68C/4, 0x1F); + INSTANCE_WR(ctx, 0x690/4, 0x27C10FA); + INSTANCE_WR(ctx, 0x694/4, 0x400000C0); + INSTANCE_WR(ctx, 0x698/4, 0xB7892080); + INSTANCE_WR(ctx, 0x6A4/4, 0x3B74F821); + INSTANCE_WR(ctx, 0x6A8/4, 0x89058001); + INSTANCE_WR(ctx, 0x6B0/4, 0x1000); + INSTANCE_WR(ctx, 0x6B4/4, 0x1F); + INSTANCE_WR(ctx, 0x6B8/4, 0x27C10FA); + INSTANCE_WR(ctx, 0x6BC/4, 0x400000C0); + INSTANCE_WR(ctx, 0x6C0/4, 0xB7892080); + INSTANCE_WR(ctx, 0x6CC/4, 0x3B74F821); + INSTANCE_WR(ctx, 0x6D0/4, 0x89058001); + INSTANCE_WR(ctx, 0x6D8/4, 0x1000); + INSTANCE_WR(ctx, 0x6DC/4, 0x1F); + INSTANCE_WR(ctx, 0x6E0/4, 0x27C10FA); + INSTANCE_WR(ctx, 0x6E4/4, 0x400000C0); + INSTANCE_WR(ctx, 0x6E8/4, 0xB7892080); + INSTANCE_WR(ctx, 0x6F4/4, 0x390040); + INSTANCE_WR(ctx, 0x6FC/4, 0x22); + INSTANCE_WR(ctx, 0x708/4, 0x390040); + INSTANCE_WR(ctx, 0x70C/4, 0x22); + INSTANCE_WR(ctx, 0x724/4, 0x1800000); + INSTANCE_WR(ctx, 0x728/4, 0x160000); + INSTANCE_WR(ctx, 0x72C/4, 0x1800000); + INSTANCE_WR(ctx, 0x73C/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x740/4, 0x118C0000); + INSTANCE_WR(ctx, 0x764/4, 0x10401); + INSTANCE_WR(ctx, 0x76C/4, 0x78); + INSTANCE_WR(ctx, 0x774/4, 0xBF); + INSTANCE_WR(ctx, 0x77C/4, 0x1210); + INSTANCE_WR(ctx, 0x780/4, 0x8000080); + INSTANCE_WR(ctx, 0x7A4/4, 0x1800000); + INSTANCE_WR(ctx, 0x7A8/4, 0x160000); + INSTANCE_WR(ctx, 0x7AC/4, 0x1800000); + INSTANCE_WR(ctx, 0x7BC/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x7C0/4, 0x118C0000); + INSTANCE_WR(ctx, 0x7E4/4, 0x10401); + INSTANCE_WR(ctx, 0x7EC/4, 0x78); + INSTANCE_WR(ctx, 0x7F4/4, 0xBF); + INSTANCE_WR(ctx, 0x7FC/4, 0x1210); + INSTANCE_WR(ctx, 0x800/4, 0x8000080); + INSTANCE_WR(ctx, 0x828/4, 0x27070); + INSTANCE_WR(ctx, 0x834/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0x84C/4, 0x120407); + INSTANCE_WR(ctx, 0x850/4, 0x5091507); + INSTANCE_WR(ctx, 0x854/4, 0x5010202); + INSTANCE_WR(ctx, 0x858/4, 0x30201); + INSTANCE_WR(ctx, 0x874/4, 0x40); + INSTANCE_WR(ctx, 0x878/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0x87C/4, 0x141210); + INSTANCE_WR(ctx, 0x880/4, 0x1F0); + INSTANCE_WR(ctx, 0x884/4, 0x1); + INSTANCE_WR(ctx, 0x888/4, 0x3); + INSTANCE_WR(ctx, 0x894/4, 0x39E00); + INSTANCE_WR(ctx, 0x898/4, 0x100); + INSTANCE_WR(ctx, 0x89C/4, 0x3800); + INSTANCE_WR(ctx, 0x8A0/4, 0x404040); + INSTANCE_WR(ctx, 0x8A4/4, 0xFF0A); + INSTANCE_WR(ctx, 0x8AC/4, 0x77F005); + INSTANCE_WR(ctx, 0x8B0/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0x8C0/4, 0x1800000); + INSTANCE_WR(ctx, 0x8C4/4, 0x160000); + INSTANCE_WR(ctx, 0x8C8/4, 0x1800000); + INSTANCE_WR(ctx, 0x8D8/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x8DC/4, 0x118C0000); + INSTANCE_WR(ctx, 0x900/4, 0x10401); + INSTANCE_WR(ctx, 0x908/4, 0x78); + INSTANCE_WR(ctx, 0x910/4, 0xBF); + INSTANCE_WR(ctx, 0x918/4, 0x1210); + INSTANCE_WR(ctx, 0x91C/4, 0x8000080); + INSTANCE_WR(ctx, 0x940/4, 0x1800000); + INSTANCE_WR(ctx, 0x944/4, 0x160000); + INSTANCE_WR(ctx, 0x948/4, 0x1800000); + INSTANCE_WR(ctx, 0x958/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x95C/4, 0x118C0000); + INSTANCE_WR(ctx, 0x980/4, 0x10401); + INSTANCE_WR(ctx, 0x988/4, 0x78); + INSTANCE_WR(ctx, 0x990/4, 0xBF); + INSTANCE_WR(ctx, 0x998/4, 0x1210); + INSTANCE_WR(ctx, 0x99C/4, 0x8000080); + INSTANCE_WR(ctx, 0x9C4/4, 0x27070); + INSTANCE_WR(ctx, 0x9D0/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0x9E8/4, 0x120407); + INSTANCE_WR(ctx, 0x9EC/4, 0x5091507); + INSTANCE_WR(ctx, 0x9F0/4, 0x5010202); + INSTANCE_WR(ctx, 0x9F4/4, 0x30201); + INSTANCE_WR(ctx, 0xA10/4, 0x40); + INSTANCE_WR(ctx, 0xA14/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0xA18/4, 0x141210); + INSTANCE_WR(ctx, 0xA1C/4, 0x1F0); + INSTANCE_WR(ctx, 0xA20/4, 0x1); + INSTANCE_WR(ctx, 0xA24/4, 0x3); + INSTANCE_WR(ctx, 0xA30/4, 0x39E00); + INSTANCE_WR(ctx, 0xA34/4, 0x100); + INSTANCE_WR(ctx, 0xA38/4, 0x3800); + INSTANCE_WR(ctx, 0xA3C/4, 0x404040); + INSTANCE_WR(ctx, 0xA40/4, 0xFF0A); + INSTANCE_WR(ctx, 0xA48/4, 0x77F005); + INSTANCE_WR(ctx, 0xA4C/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0xA5C/4, 0x1800000); + INSTANCE_WR(ctx, 0xA60/4, 0x160000); + INSTANCE_WR(ctx, 0xA64/4, 0x1800000); + INSTANCE_WR(ctx, 0xA74/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xA78/4, 0x118C0000); + INSTANCE_WR(ctx, 0xA9C/4, 0x10401); + INSTANCE_WR(ctx, 0xAA4/4, 0x78); + INSTANCE_WR(ctx, 0xAAC/4, 0xBF); + INSTANCE_WR(ctx, 0xAB4/4, 0x1210); + INSTANCE_WR(ctx, 0xAB8/4, 0x8000080); + INSTANCE_WR(ctx, 0xADC/4, 0x1800000); + INSTANCE_WR(ctx, 0xAE0/4, 0x160000); + INSTANCE_WR(ctx, 0xAE4/4, 0x1800000); + INSTANCE_WR(ctx, 0xAF4/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xAF8/4, 0x118C0000); + INSTANCE_WR(ctx, 0xB1C/4, 0x10401); + INSTANCE_WR(ctx, 0xB24/4, 0x78); + INSTANCE_WR(ctx, 0xB2C/4, 0xBF); + INSTANCE_WR(ctx, 0xB34/4, 0x1210); + INSTANCE_WR(ctx, 0xB38/4, 0x8000080); + INSTANCE_WR(ctx, 0xB60/4, 0x27070); + INSTANCE_WR(ctx, 0xB6C/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0xB84/4, 0x120407); + INSTANCE_WR(ctx, 0xB88/4, 0x5091507); + INSTANCE_WR(ctx, 0xB8C/4, 0x5010202); + INSTANCE_WR(ctx, 0xB90/4, 0x30201); + INSTANCE_WR(ctx, 0xBAC/4, 0x40); + INSTANCE_WR(ctx, 0xBB0/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0xBB4/4, 0x141210); + INSTANCE_WR(ctx, 0xBB8/4, 0x1F0); + INSTANCE_WR(ctx, 0xBBC/4, 0x1); + INSTANCE_WR(ctx, 0xBC0/4, 0x3); + INSTANCE_WR(ctx, 0xBCC/4, 0x39E00); + INSTANCE_WR(ctx, 0xBD0/4, 0x100); + INSTANCE_WR(ctx, 0xBD4/4, 0x3800); + INSTANCE_WR(ctx, 0xBD8/4, 0x404040); + INSTANCE_WR(ctx, 0xBDC/4, 0xFF0A); + INSTANCE_WR(ctx, 0xBE4/4, 0x77F005); + INSTANCE_WR(ctx, 0xBE8/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0xBF8/4, 0x1800000); + INSTANCE_WR(ctx, 0xBFC/4, 0x160000); + INSTANCE_WR(ctx, 0xC00/4, 0x1800000); + INSTANCE_WR(ctx, 0xC10/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xC14/4, 0x118C0000); + INSTANCE_WR(ctx, 0xC38/4, 0x10401); + INSTANCE_WR(ctx, 0xC40/4, 0x78); + INSTANCE_WR(ctx, 0xC48/4, 0xBF); + INSTANCE_WR(ctx, 0xC50/4, 0x1210); + INSTANCE_WR(ctx, 0xC54/4, 0x8000080); + INSTANCE_WR(ctx, 0xC78/4, 0x1800000); + INSTANCE_WR(ctx, 0xC7C/4, 0x160000); + INSTANCE_WR(ctx, 0xC80/4, 0x1800000); + INSTANCE_WR(ctx, 0xC90/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xC94/4, 0x118C0000); + INSTANCE_WR(ctx, 0xCB8/4, 0x10401); + INSTANCE_WR(ctx, 0xCC0/4, 0x78); + INSTANCE_WR(ctx, 0xCC8/4, 0xBF); + INSTANCE_WR(ctx, 0xCD0/4, 0x1210); + INSTANCE_WR(ctx, 0xCD4/4, 0x8000080); + INSTANCE_WR(ctx, 0xCFC/4, 0x27070); + INSTANCE_WR(ctx, 0xD08/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0xD20/4, 0x120407); + INSTANCE_WR(ctx, 0xD24/4, 0x5091507); + INSTANCE_WR(ctx, 0xD28/4, 0x5010202); + INSTANCE_WR(ctx, 0xD2C/4, 0x30201); + INSTANCE_WR(ctx, 0xD48/4, 0x40); + INSTANCE_WR(ctx, 0xD4C/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0xD50/4, 0x141210); + INSTANCE_WR(ctx, 0xD54/4, 0x1F0); + INSTANCE_WR(ctx, 0xD58/4, 0x1); + INSTANCE_WR(ctx, 0xD5C/4, 0x3); + INSTANCE_WR(ctx, 0xD68/4, 0x39E00); + INSTANCE_WR(ctx, 0xD6C/4, 0x100); + INSTANCE_WR(ctx, 0xD70/4, 0x3800); + INSTANCE_WR(ctx, 0xD74/4, 0x404040); + INSTANCE_WR(ctx, 0xD78/4, 0xFF0A); + INSTANCE_WR(ctx, 0xD80/4, 0x77F005); + INSTANCE_WR(ctx, 0xD84/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0xD94/4, 0x1800000); + INSTANCE_WR(ctx, 0xD98/4, 0x160000); + INSTANCE_WR(ctx, 0xD9C/4, 0x1800000); + INSTANCE_WR(ctx, 0xDAC/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xDB0/4, 0x118C0000); + INSTANCE_WR(ctx, 0xDD4/4, 0x10401); + INSTANCE_WR(ctx, 0xDDC/4, 0x78); + INSTANCE_WR(ctx, 0xDE4/4, 0xBF); + INSTANCE_WR(ctx, 0xDEC/4, 0x1210); + INSTANCE_WR(ctx, 0xDF0/4, 0x8000080); + INSTANCE_WR(ctx, 0xE14/4, 0x1800000); + INSTANCE_WR(ctx, 0xE18/4, 0x160000); + INSTANCE_WR(ctx, 0xE1C/4, 0x1800000); + INSTANCE_WR(ctx, 0xE2C/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xE30/4, 0x118C0000); + INSTANCE_WR(ctx, 0xE54/4, 0x10401); + INSTANCE_WR(ctx, 0xE5C/4, 0x78); + INSTANCE_WR(ctx, 0xE64/4, 0xBF); + INSTANCE_WR(ctx, 0xE6C/4, 0x1210); + INSTANCE_WR(ctx, 0xE70/4, 0x8000080); + INSTANCE_WR(ctx, 0xE98/4, 0x27070); + INSTANCE_WR(ctx, 0xEA4/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0xEBC/4, 0x120407); + INSTANCE_WR(ctx, 0xEC0/4, 0x5091507); + INSTANCE_WR(ctx, 0xEC4/4, 0x5010202); + INSTANCE_WR(ctx, 0xEC8/4, 0x30201); + INSTANCE_WR(ctx, 0xEE4/4, 0x40); + INSTANCE_WR(ctx, 0xEE8/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0xEEC/4, 0x141210); + INSTANCE_WR(ctx, 0xEF0/4, 0x1F0); + INSTANCE_WR(ctx, 0xEF4/4, 0x1); + INSTANCE_WR(ctx, 0xEF8/4, 0x3); + INSTANCE_WR(ctx, 0xF04/4, 0x39E00); + INSTANCE_WR(ctx, 0xF08/4, 0x100); + INSTANCE_WR(ctx, 0xF0C/4, 0x3800); + INSTANCE_WR(ctx, 0xF10/4, 0x404040); + INSTANCE_WR(ctx, 0xF14/4, 0xFF0A); + INSTANCE_WR(ctx, 0xF1C/4, 0x77F005); + INSTANCE_WR(ctx, 0xF20/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0xF30/4, 0x1800000); + INSTANCE_WR(ctx, 0xF34/4, 0x160000); + INSTANCE_WR(ctx, 0xF38/4, 0x1800000); + INSTANCE_WR(ctx, 0xF48/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xF4C/4, 0x118C0000); + INSTANCE_WR(ctx, 0xF70/4, 0x10401); + INSTANCE_WR(ctx, 0xF78/4, 0x78); + INSTANCE_WR(ctx, 0xF80/4, 0xBF); + INSTANCE_WR(ctx, 0xF88/4, 0x1210); + INSTANCE_WR(ctx, 0xF8C/4, 0x8000080); + INSTANCE_WR(ctx, 0xFB0/4, 0x1800000); + INSTANCE_WR(ctx, 0xFB4/4, 0x160000); + INSTANCE_WR(ctx, 0xFB8/4, 0x1800000); + INSTANCE_WR(ctx, 0xFC8/4, 0x3FFFF); + INSTANCE_WR(ctx, 0xFCC/4, 0x118C0000); + INSTANCE_WR(ctx, 0xFF0/4, 0x10401); + INSTANCE_WR(ctx, 0xFF8/4, 0x78); + INSTANCE_WR(ctx, 0x1000/4, 0xBF); + INSTANCE_WR(ctx, 0x1008/4, 0x1210); + INSTANCE_WR(ctx, 0x100C/4, 0x8000080); + INSTANCE_WR(ctx, 0x1034/4, 0x27070); + INSTANCE_WR(ctx, 0x1040/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0x1058/4, 0x120407); + INSTANCE_WR(ctx, 0x105C/4, 0x5091507); + INSTANCE_WR(ctx, 0x1060/4, 0x5010202); + INSTANCE_WR(ctx, 0x1064/4, 0x30201); + INSTANCE_WR(ctx, 0x1080/4, 0x40); + INSTANCE_WR(ctx, 0x1084/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0x1088/4, 0x141210); + INSTANCE_WR(ctx, 0x108C/4, 0x1F0); + INSTANCE_WR(ctx, 0x1090/4, 0x1); + INSTANCE_WR(ctx, 0x1094/4, 0x3); + INSTANCE_WR(ctx, 0x10A0/4, 0x39E00); + INSTANCE_WR(ctx, 0x10A4/4, 0x100); + INSTANCE_WR(ctx, 0x10A8/4, 0x3800); + INSTANCE_WR(ctx, 0x10AC/4, 0x404040); + INSTANCE_WR(ctx, 0x10B0/4, 0xFF0A); + INSTANCE_WR(ctx, 0x10B8/4, 0x77F005); + INSTANCE_WR(ctx, 0x10BC/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0x10CC/4, 0x1800000); + INSTANCE_WR(ctx, 0x10D0/4, 0x160000); + INSTANCE_WR(ctx, 0x10D4/4, 0x1800000); + INSTANCE_WR(ctx, 0x10E4/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x10E8/4, 0x118C0000); + INSTANCE_WR(ctx, 0x110C/4, 0x10401); + INSTANCE_WR(ctx, 0x1114/4, 0x78); + INSTANCE_WR(ctx, 0x111C/4, 0xBF); + INSTANCE_WR(ctx, 0x1124/4, 0x1210); + INSTANCE_WR(ctx, 0x1128/4, 0x8000080); + INSTANCE_WR(ctx, 0x114C/4, 0x1800000); + INSTANCE_WR(ctx, 0x1150/4, 0x160000); + INSTANCE_WR(ctx, 0x1154/4, 0x1800000); + INSTANCE_WR(ctx, 0x1164/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x1168/4, 0x118C0000); + INSTANCE_WR(ctx, 0x118C/4, 0x10401); + INSTANCE_WR(ctx, 0x1194/4, 0x78); + INSTANCE_WR(ctx, 0x119C/4, 0xBF); + INSTANCE_WR(ctx, 0x11A4/4, 0x1210); + INSTANCE_WR(ctx, 0x11A8/4, 0x8000080); + INSTANCE_WR(ctx, 0x11D0/4, 0x27070); + INSTANCE_WR(ctx, 0x11DC/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0x11F4/4, 0x120407); + INSTANCE_WR(ctx, 0x11F8/4, 0x5091507); + INSTANCE_WR(ctx, 0x11FC/4, 0x5010202); + INSTANCE_WR(ctx, 0x1200/4, 0x30201); + INSTANCE_WR(ctx, 0x121C/4, 0x40); + INSTANCE_WR(ctx, 0x1220/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0x1224/4, 0x141210); + INSTANCE_WR(ctx, 0x1228/4, 0x1F0); + INSTANCE_WR(ctx, 0x122C/4, 0x1); + INSTANCE_WR(ctx, 0x1230/4, 0x3); + INSTANCE_WR(ctx, 0x123C/4, 0x39E00); + INSTANCE_WR(ctx, 0x1240/4, 0x100); + INSTANCE_WR(ctx, 0x1244/4, 0x3800); + INSTANCE_WR(ctx, 0x1248/4, 0x404040); + INSTANCE_WR(ctx, 0x124C/4, 0xFF0A); + INSTANCE_WR(ctx, 0x1254/4, 0x77F005); + INSTANCE_WR(ctx, 0x1258/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0x1268/4, 0x1800000); + INSTANCE_WR(ctx, 0x126C/4, 0x160000); + INSTANCE_WR(ctx, 0x1270/4, 0x1800000); + INSTANCE_WR(ctx, 0x1280/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x1284/4, 0x118C0000); + INSTANCE_WR(ctx, 0x12A8/4, 0x10401); + INSTANCE_WR(ctx, 0x12B0/4, 0x78); + INSTANCE_WR(ctx, 0x12B8/4, 0xBF); + INSTANCE_WR(ctx, 0x12C0/4, 0x1210); + INSTANCE_WR(ctx, 0x12C4/4, 0x8000080); + INSTANCE_WR(ctx, 0x12E8/4, 0x1800000); + INSTANCE_WR(ctx, 0x12EC/4, 0x160000); + INSTANCE_WR(ctx, 0x12F0/4, 0x1800000); + INSTANCE_WR(ctx, 0x1300/4, 0x3FFFF); + INSTANCE_WR(ctx, 0x1304/4, 0x118C0000); + INSTANCE_WR(ctx, 0x1328/4, 0x10401); + INSTANCE_WR(ctx, 0x1330/4, 0x78); + INSTANCE_WR(ctx, 0x1338/4, 0xBF); + INSTANCE_WR(ctx, 0x1340/4, 0x1210); + INSTANCE_WR(ctx, 0x1344/4, 0x8000080); + INSTANCE_WR(ctx, 0x136C/4, 0x27070); + INSTANCE_WR(ctx, 0x1378/4, 0x3FFFFFF); + INSTANCE_WR(ctx, 0x1390/4, 0x120407); + INSTANCE_WR(ctx, 0x1394/4, 0x5091507); + INSTANCE_WR(ctx, 0x1398/4, 0x5010202); + INSTANCE_WR(ctx, 0x139C/4, 0x30201); + INSTANCE_WR(ctx, 0x13B8/4, 0x40); + INSTANCE_WR(ctx, 0x13BC/4, 0xD0C0B0A); + INSTANCE_WR(ctx, 0x13C0/4, 0x141210); + INSTANCE_WR(ctx, 0x13C4/4, 0x1F0); + INSTANCE_WR(ctx, 0x13C8/4, 0x1); + INSTANCE_WR(ctx, 0x13CC/4, 0x3); + INSTANCE_WR(ctx, 0x13D8/4, 0x39E00); + INSTANCE_WR(ctx, 0x13DC/4, 0x100); + INSTANCE_WR(ctx, 0x13E0/4, 0x3800); + INSTANCE_WR(ctx, 0x13E4/4, 0x404040); + INSTANCE_WR(ctx, 0x13E8/4, 0xFF0A); + INSTANCE_WR(ctx, 0x13F0/4, 0x77F005); + INSTANCE_WR(ctx, 0x13F4/4, 0x3F7FFF); + INSTANCE_WR(ctx, 0x8620/4, 0x21); + INSTANCE_WR(ctx, 0x8640/4, 0x1); + INSTANCE_WR(ctx, 0x8660/4, 0x2); + INSTANCE_WR(ctx, 0x8680/4, 0x100); + INSTANCE_WR(ctx, 0x86A0/4, 0x100); + INSTANCE_WR(ctx, 0x86C0/4, 0x1); + INSTANCE_WR(ctx, 0x8720/4, 0x1); + INSTANCE_WR(ctx, 0x8740/4, 0x2); + INSTANCE_WR(ctx, 0x8760/4, 0x100); + INSTANCE_WR(ctx, 0x8780/4, 0x100); + INSTANCE_WR(ctx, 0x87A0/4, 0x1); + INSTANCE_WR(ctx, 0x1B8C0/4, 0x4); + INSTANCE_WR(ctx, 0x1B8E0/4, 0x4); + INSTANCE_WR(ctx, 0x54260/4, 0x4); + INSTANCE_WR(ctx, 0x54280/4, 0x4); + INSTANCE_WR(ctx, 0x542A0/4, 0x8100C12); + INSTANCE_WR(ctx, 0x542C0/4, 0x3); + INSTANCE_WR(ctx, 0x54300/4, 0x8100C12); + INSTANCE_WR(ctx, 0x54340/4, 0x80C14); + INSTANCE_WR(ctx, 0x54360/4, 0x1); + INSTANCE_WR(ctx, 0x54380/4, 0x80C14); + INSTANCE_WR(ctx, 0x543E0/4, 0x8100C12); + INSTANCE_WR(ctx, 0x54400/4, 0x27); + INSTANCE_WR(ctx, 0x54460/4, 0x1); + INSTANCE_WR(ctx, 0x5BCA0/4, 0x1); + INSTANCE_WR(ctx, 0x5BF80/4, 0x8100C12); + INSTANCE_WR(ctx, 0x5C120/4, 0x4000000); + INSTANCE_WR(ctx, 0x5C140/4, 0x4000000); + INSTANCE_WR(ctx, 0x5C180/4, 0x80); + INSTANCE_WR(ctx, 0x5C200/4, 0x80); + INSTANCE_WR(ctx, 0x5C240/4, 0x3F); + INSTANCE_WR(ctx, 0x5C3A0/4, 0x2); + INSTANCE_WR(ctx, 0x5C3C0/4, 0x4000000); + INSTANCE_WR(ctx, 0x5C3E0/4, 0x4000000); + INSTANCE_WR(ctx, 0x5C500/4, 0x4); + INSTANCE_WR(ctx, 0x5C580/4, 0x4); + INSTANCE_WR(ctx, 0x5C7C0/4, 0x1); + INSTANCE_WR(ctx, 0x5C7E0/4, 0x1001); + INSTANCE_WR(ctx, 0x5C800/4, 0xFFFF); + INSTANCE_WR(ctx, 0x5C820/4, 0xFFFF); + INSTANCE_WR(ctx, 0x5C840/4, 0xFFFF); + INSTANCE_WR(ctx, 0x5C860/4, 0xFFFF); + INSTANCE_WR(ctx, 0x5CC80/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CCA0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CCC0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CCE0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CD00/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CD20/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CD40/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CD60/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CD80/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CDA0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CDC0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CDE0/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CE00/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CE20/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CE40/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CE60/4, 0x3F800000); + INSTANCE_WR(ctx, 0x5CE80/4, 0x10); + INSTANCE_WR(ctx, 0x5CEE0/4, 0x3); + INSTANCE_WR(ctx, 0x1584/4, 0xF); + INSTANCE_WR(ctx, 0x1624/4, 0x20); + INSTANCE_WR(ctx, 0x1804/4, 0x1A); + INSTANCE_WR(ctx, 0x19C4/4, 0x4); + INSTANCE_WR(ctx, 0x19E4/4, 0x4); + INSTANCE_WR(ctx, 0x1A24/4, 0x4); + INSTANCE_WR(ctx, 0x1A44/4, 0x8); + INSTANCE_WR(ctx, 0x1A84/4, 0x7FF); + INSTANCE_WR(ctx, 0x1C24/4, 0xF); + INSTANCE_WR(ctx, 0x4104/4, 0xF); + INSTANCE_WR(ctx, 0x4144/4, 0x1); + INSTANCE_WR(ctx, 0x4CA4/4, 0xF); + INSTANCE_WR(ctx, 0x15344/4, 0xF); + INSTANCE_WR(ctx, 0x155E4/4, 0x1); + INSTANCE_WR(ctx, 0x15604/4, 0x100); + INSTANCE_WR(ctx, 0x15624/4, 0x100); + INSTANCE_WR(ctx, 0x15644/4, 0x11); + INSTANCE_WR(ctx, 0x15684/4, 0x8); + INSTANCE_WR(ctx, 0x15744/4, 0x1); + INSTANCE_WR(ctx, 0x15784/4, 0x1); + INSTANCE_WR(ctx, 0x157A4/4, 0x1); + INSTANCE_WR(ctx, 0x157C4/4, 0x1); + INSTANCE_WR(ctx, 0x157E4/4, 0xCF); + INSTANCE_WR(ctx, 0x15804/4, 0x2); + INSTANCE_WR(ctx, 0x158E4/4, 0x1); + INSTANCE_WR(ctx, 0x15924/4, 0x1); + INSTANCE_WR(ctx, 0x15944/4, 0x1); + INSTANCE_WR(ctx, 0x15964/4, 0x1); + INSTANCE_WR(ctx, 0x15A04/4, 0x4); + INSTANCE_WR(ctx, 0x15A44/4, 0x1); + INSTANCE_WR(ctx, 0x15A64/4, 0x15); + INSTANCE_WR(ctx, 0x15AE4/4, 0x4444480); + INSTANCE_WR(ctx, 0x16264/4, 0x8100C12); + INSTANCE_WR(ctx, 0x16304/4, 0x100); + INSTANCE_WR(ctx, 0x16364/4, 0x10001); + INSTANCE_WR(ctx, 0x163A4/4, 0x10001); + INSTANCE_WR(ctx, 0x163C4/4, 0x1); + INSTANCE_WR(ctx, 0x163E4/4, 0x10001); + INSTANCE_WR(ctx, 0x16404/4, 0x1); + INSTANCE_WR(ctx, 0x16424/4, 0x4); + INSTANCE_WR(ctx, 0x16444/4, 0x2); + INSTANCE_WR(ctx, 0x183C4/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x183E4/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x18484/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x18604/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x18624/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x16508/4, 0x3FFFFF); + INSTANCE_WR(ctx, 0x16568/4, 0x1FFF); + INSTANCE_WR(ctx, 0x16748/4, 0x3F800000); + INSTANCE_WR(ctx, 0x16828/4, 0x4); + INSTANCE_WR(ctx, 0x16848/4, 0x1A); + INSTANCE_WR(ctx, 0x168A8/4, 0x1); + INSTANCE_WR(ctx, 0x16B08/4, 0xFFFF00); + INSTANCE_WR(ctx, 0x16BE8/4, 0xF); + INSTANCE_WR(ctx, 0x16CE8/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x16D08/4, 0x11); + INSTANCE_WR(ctx, 0x16F08/4, 0x4); + INSTANCE_WR(ctx, 0x16FA8/4, 0x2); + INSTANCE_WR(ctx, 0x16FC8/4, 0x4000000); + INSTANCE_WR(ctx, 0x16FE8/4, 0x4000000); + INSTANCE_WR(ctx, 0x17068/4, 0x5); + INSTANCE_WR(ctx, 0x17088/4, 0x52); + INSTANCE_WR(ctx, 0x17128/4, 0x1); + INSTANCE_WR(ctx, 0x17348/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17368/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17388/4, 0x3F800000); + INSTANCE_WR(ctx, 0x173A8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x173C8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x173E8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17408/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17428/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17448/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17468/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17488/4, 0x3F800000); + INSTANCE_WR(ctx, 0x174A8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x174C8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x174E8/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17508/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17528/4, 0x3F800000); + INSTANCE_WR(ctx, 0x17548/4, 0x10); + INSTANCE_WR(ctx, 0x17A28/4, 0x8100C12); + INSTANCE_WR(ctx, 0x17A48/4, 0x5); + INSTANCE_WR(ctx, 0x17AA8/4, 0x1); + INSTANCE_WR(ctx, 0x17AE8/4, 0xFFFF); + INSTANCE_WR(ctx, 0x17B08/4, 0xFFFF); + INSTANCE_WR(ctx, 0x17B28/4, 0xFFFF); + INSTANCE_WR(ctx, 0x17B48/4, 0xFFFF); + INSTANCE_WR(ctx, 0x17B68/4, 0x3); + INSTANCE_WR(ctx, 0x17F68/4, 0xFFFF00); + INSTANCE_WR(ctx, 0x17F88/4, 0x1A); + INSTANCE_WR(ctx, 0x17FC8/4, 0x3); + INSTANCE_WR(ctx, 0x184A8/4, 0x102); + INSTANCE_WR(ctx, 0x184E8/4, 0x4); + INSTANCE_WR(ctx, 0x18508/4, 0x4); + INSTANCE_WR(ctx, 0x18528/4, 0x4); + INSTANCE_WR(ctx, 0x18548/4, 0x4); + INSTANCE_WR(ctx, 0x18568/4, 0x4); + INSTANCE_WR(ctx, 0x18588/4, 0x4); + INSTANCE_WR(ctx, 0x185C8/4, 0x7FF); + INSTANCE_WR(ctx, 0x18608/4, 0x102); + INSTANCE_WR(ctx, 0x18748/4, 0x4); + INSTANCE_WR(ctx, 0x18768/4, 0x4); + INSTANCE_WR(ctx, 0x18788/4, 0x4); + INSTANCE_WR(ctx, 0x187A8/4, 0x4); + INSTANCE_WR(ctx, 0x18DE8/4, 0x80C14); + INSTANCE_WR(ctx, 0x18E48/4, 0x804); + INSTANCE_WR(ctx, 0x18E88/4, 0x4); + INSTANCE_WR(ctx, 0x18EA8/4, 0x4); + INSTANCE_WR(ctx, 0x18EC8/4, 0x8100C12); + INSTANCE_WR(ctx, 0x18F08/4, 0x4); + INSTANCE_WR(ctx, 0x18F28/4, 0x4); + INSTANCE_WR(ctx, 0x18F68/4, 0x10); + INSTANCE_WR(ctx, 0x19008/4, 0x804); + INSTANCE_WR(ctx, 0x19028/4, 0x1); + INSTANCE_WR(ctx, 0x19048/4, 0x1A); + INSTANCE_WR(ctx, 0x19068/4, 0x7F); + INSTANCE_WR(ctx, 0x190A8/4, 0x1); + INSTANCE_WR(ctx, 0x190C8/4, 0x80C14); + INSTANCE_WR(ctx, 0x19108/4, 0x8100C12); + INSTANCE_WR(ctx, 0x19128/4, 0x4); + INSTANCE_WR(ctx, 0x19148/4, 0x4); + INSTANCE_WR(ctx, 0x19188/4, 0x10); + INSTANCE_WR(ctx, 0x19208/4, 0x1); + INSTANCE_WR(ctx, 0x19228/4, 0x8100C12); + INSTANCE_WR(ctx, 0x19308/4, 0x7FF); + INSTANCE_WR(ctx, 0x19328/4, 0x80C14); + INSTANCE_WR(ctx, 0x19A48/4, 0x1); + INSTANCE_WR(ctx, 0x19AA8/4, 0x10); + INSTANCE_WR(ctx, 0x1A1C8/4, 0x88); + INSTANCE_WR(ctx, 0x1A1E8/4, 0x88); + INSTANCE_WR(ctx, 0x1A248/4, 0x4); + INSTANCE_WR(ctx, 0x1A528/4, 0x26); + INSTANCE_WR(ctx, 0x1A588/4, 0x3F800000); + INSTANCE_WR(ctx, 0x1A608/4, 0x1A); + INSTANCE_WR(ctx, 0x1A628/4, 0x10); + INSTANCE_WR(ctx, 0x1AB48/4, 0x52); + INSTANCE_WR(ctx, 0x1AB88/4, 0x26); + INSTANCE_WR(ctx, 0x1ABC8/4, 0x4); + INSTANCE_WR(ctx, 0x1ABE8/4, 0x4); + INSTANCE_WR(ctx, 0x1AC28/4, 0x1A); + INSTANCE_WR(ctx, 0x1AC88/4, 0xFFFF00); + INSTANCE_WR(ctx, 0x1ACC8/4, 0x4); + INSTANCE_WR(ctx, 0x1ACE8/4, 0x4); + INSTANCE_WR(ctx, 0x1AD28/4, 0x80); + INSTANCE_WR(ctx, 0x1AD48/4, 0x4); + INSTANCE_WR(ctx, 0x1AD68/4, 0x80C14); + INSTANCE_WR(ctx, 0x1ADA8/4, 0x7FF); + INSTANCE_WR(ctx, 0x2D608/4, 0x4); + INSTANCE_WR(ctx, 0x2D628/4, 0x4); + INSTANCE_WR(ctx, 0x2D668/4, 0x80); + INSTANCE_WR(ctx, 0x2D688/4, 0x4); + INSTANCE_WR(ctx, 0x2D6A8/4, 0x1); + INSTANCE_WR(ctx, 0x2D6E8/4, 0x27); + INSTANCE_WR(ctx, 0x2D728/4, 0x26); + INSTANCE_WR(ctx, 0x2D7A8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D7C8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D7E8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D808/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D828/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D848/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D868/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D888/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D8A8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D8C8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D8E8/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D908/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D928/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D948/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D968/4, 0x4000000); + INSTANCE_WR(ctx, 0x2D988/4, 0x4000000); + INSTANCE_WR(ctx, 0x2DE28/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x2DE48/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x2DEA8/4, 0x1FE21); + INSTANCE_WR(ctx, 0x160C/4, 0x2); + INSTANCE_WR(ctx, 0x164C/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x17EC/4, 0x1); + INSTANCE_WR(ctx, 0x180C/4, 0x10); + INSTANCE_WR(ctx, 0x186C/4, 0x1); + INSTANCE_WR(ctx, 0x190C/4, 0x4); + INSTANCE_WR(ctx, 0x192C/4, 0x400); + INSTANCE_WR(ctx, 0x194C/4, 0x300); + INSTANCE_WR(ctx, 0x196C/4, 0x1001); + INSTANCE_WR(ctx, 0x198C/4, 0x15); + INSTANCE_WR(ctx, 0x1A4C/4, 0x2); + INSTANCE_WR(ctx, 0x1B6C/4, 0x1); + INSTANCE_WR(ctx, 0x1B8C/4, 0x10); + INSTANCE_WR(ctx, 0x1BCC/4, 0x1); + INSTANCE_WR(ctx, 0x1E4C/4, 0x10); + INSTANCE_WR(ctx, 0x206C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x208C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x20AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x20CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x20EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x210C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x212C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x214C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x216C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x218C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x21AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x21CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x21EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x220C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x222C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x224C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x258C/4, 0x10); + INSTANCE_WR(ctx, 0x25CC/4, 0x3F); + INSTANCE_WR(ctx, 0x26AC/4, 0x1); + INSTANCE_WR(ctx, 0x26EC/4, 0x1); + INSTANCE_WR(ctx, 0x272C/4, 0x1); + INSTANCE_WR(ctx, 0x28CC/4, 0x11); + INSTANCE_WR(ctx, 0x29CC/4, 0xF); + INSTANCE_WR(ctx, 0x2ACC/4, 0x11); + INSTANCE_WR(ctx, 0x2BAC/4, 0x1); + INSTANCE_WR(ctx, 0x2BCC/4, 0x1); + INSTANCE_WR(ctx, 0x2BEC/4, 0x1); + INSTANCE_WR(ctx, 0x2C0C/4, 0x2); + INSTANCE_WR(ctx, 0x2C2C/4, 0x1); + INSTANCE_WR(ctx, 0x2C4C/4, 0x2); + INSTANCE_WR(ctx, 0x2C6C/4, 0x1); + INSTANCE_WR(ctx, 0x2CAC/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x2CEC/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x2FAC/4, 0x1); + INSTANCE_WR(ctx, 0x2FCC/4, 0x2); + INSTANCE_WR(ctx, 0x2FEC/4, 0x1); + INSTANCE_WR(ctx, 0x300C/4, 0x1); + INSTANCE_WR(ctx, 0x302C/4, 0x2); + INSTANCE_WR(ctx, 0x304C/4, 0x1); + INSTANCE_WR(ctx, 0x306C/4, 0x1); + INSTANCE_WR(ctx, 0x30EC/4, 0x11); + INSTANCE_WR(ctx, 0x310C/4, 0x1); + INSTANCE_WR(ctx, 0x3D8C/4, 0x2); + INSTANCE_WR(ctx, 0x3DCC/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x3F6C/4, 0x1); + INSTANCE_WR(ctx, 0x3F8C/4, 0x10); + INSTANCE_WR(ctx, 0x3FEC/4, 0x1); + INSTANCE_WR(ctx, 0x408C/4, 0x4); + INSTANCE_WR(ctx, 0x40AC/4, 0x400); + INSTANCE_WR(ctx, 0x40CC/4, 0x300); + INSTANCE_WR(ctx, 0x40EC/4, 0x1001); + INSTANCE_WR(ctx, 0x410C/4, 0x15); + INSTANCE_WR(ctx, 0x41CC/4, 0x2); + INSTANCE_WR(ctx, 0x42EC/4, 0x1); + INSTANCE_WR(ctx, 0x430C/4, 0x10); + INSTANCE_WR(ctx, 0x434C/4, 0x1); + INSTANCE_WR(ctx, 0x45CC/4, 0x10); + INSTANCE_WR(ctx, 0x47EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x480C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x482C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x484C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x486C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x488C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x48AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x48CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x48EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x490C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x492C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x494C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x496C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x498C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x49AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x49CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x4D0C/4, 0x10); + INSTANCE_WR(ctx, 0x4D4C/4, 0x3F); + INSTANCE_WR(ctx, 0x4E2C/4, 0x1); + INSTANCE_WR(ctx, 0x4E6C/4, 0x1); + INSTANCE_WR(ctx, 0x4EAC/4, 0x1); + INSTANCE_WR(ctx, 0x504C/4, 0x11); + INSTANCE_WR(ctx, 0x514C/4, 0xF); + INSTANCE_WR(ctx, 0x524C/4, 0x11); + INSTANCE_WR(ctx, 0x532C/4, 0x1); + INSTANCE_WR(ctx, 0x534C/4, 0x1); + INSTANCE_WR(ctx, 0x536C/4, 0x1); + INSTANCE_WR(ctx, 0x538C/4, 0x2); + INSTANCE_WR(ctx, 0x53AC/4, 0x1); + INSTANCE_WR(ctx, 0x53CC/4, 0x2); + INSTANCE_WR(ctx, 0x53EC/4, 0x1); + INSTANCE_WR(ctx, 0x542C/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x546C/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x572C/4, 0x1); + INSTANCE_WR(ctx, 0x574C/4, 0x2); + INSTANCE_WR(ctx, 0x576C/4, 0x1); + INSTANCE_WR(ctx, 0x578C/4, 0x1); + INSTANCE_WR(ctx, 0x57AC/4, 0x2); + INSTANCE_WR(ctx, 0x57CC/4, 0x1); + INSTANCE_WR(ctx, 0x57EC/4, 0x1); + INSTANCE_WR(ctx, 0x586C/4, 0x11); + INSTANCE_WR(ctx, 0x588C/4, 0x1); + INSTANCE_WR(ctx, 0x650C/4, 0x2); + INSTANCE_WR(ctx, 0x654C/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x66EC/4, 0x1); + INSTANCE_WR(ctx, 0x670C/4, 0x10); + INSTANCE_WR(ctx, 0x676C/4, 0x1); + INSTANCE_WR(ctx, 0x680C/4, 0x4); + INSTANCE_WR(ctx, 0x682C/4, 0x400); + INSTANCE_WR(ctx, 0x684C/4, 0x300); + INSTANCE_WR(ctx, 0x686C/4, 0x1001); + INSTANCE_WR(ctx, 0x688C/4, 0x15); + INSTANCE_WR(ctx, 0x694C/4, 0x2); + INSTANCE_WR(ctx, 0x6A6C/4, 0x1); + INSTANCE_WR(ctx, 0x6A8C/4, 0x10); + INSTANCE_WR(ctx, 0x6ACC/4, 0x1); + INSTANCE_WR(ctx, 0x6D4C/4, 0x10); + INSTANCE_WR(ctx, 0x6F6C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x6F8C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x6FAC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x6FCC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x6FEC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x700C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x702C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x704C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x706C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x708C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x70AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x70CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x70EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x710C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x712C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x714C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x748C/4, 0x10); + INSTANCE_WR(ctx, 0x74CC/4, 0x3F); + INSTANCE_WR(ctx, 0x75AC/4, 0x1); + INSTANCE_WR(ctx, 0x75EC/4, 0x1); + INSTANCE_WR(ctx, 0x762C/4, 0x1); + INSTANCE_WR(ctx, 0x77CC/4, 0x11); + INSTANCE_WR(ctx, 0x78CC/4, 0xF); + INSTANCE_WR(ctx, 0x79CC/4, 0x11); + INSTANCE_WR(ctx, 0x7AAC/4, 0x1); + INSTANCE_WR(ctx, 0x7ACC/4, 0x1); + INSTANCE_WR(ctx, 0x7AEC/4, 0x1); + INSTANCE_WR(ctx, 0x7B0C/4, 0x2); + INSTANCE_WR(ctx, 0x7B2C/4, 0x1); + INSTANCE_WR(ctx, 0x7B4C/4, 0x2); + INSTANCE_WR(ctx, 0x7B6C/4, 0x1); + INSTANCE_WR(ctx, 0x7BAC/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x7BEC/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x7EAC/4, 0x1); + INSTANCE_WR(ctx, 0x7ECC/4, 0x2); + INSTANCE_WR(ctx, 0x7EEC/4, 0x1); + INSTANCE_WR(ctx, 0x7F0C/4, 0x1); + INSTANCE_WR(ctx, 0x7F2C/4, 0x2); + INSTANCE_WR(ctx, 0x7F4C/4, 0x1); + INSTANCE_WR(ctx, 0x7F6C/4, 0x1); + INSTANCE_WR(ctx, 0x7FEC/4, 0x11); + INSTANCE_WR(ctx, 0x800C/4, 0x1); + INSTANCE_WR(ctx, 0x8C8C/4, 0x2); + INSTANCE_WR(ctx, 0x8CCC/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x8E6C/4, 0x1); + INSTANCE_WR(ctx, 0x8E8C/4, 0x10); + INSTANCE_WR(ctx, 0x8EEC/4, 0x1); + INSTANCE_WR(ctx, 0x8F8C/4, 0x4); + INSTANCE_WR(ctx, 0x8FAC/4, 0x400); + INSTANCE_WR(ctx, 0x8FCC/4, 0x300); + INSTANCE_WR(ctx, 0x8FEC/4, 0x1001); + INSTANCE_WR(ctx, 0x900C/4, 0x15); + INSTANCE_WR(ctx, 0x90CC/4, 0x2); + INSTANCE_WR(ctx, 0x91EC/4, 0x1); + INSTANCE_WR(ctx, 0x920C/4, 0x10); + INSTANCE_WR(ctx, 0x924C/4, 0x1); + INSTANCE_WR(ctx, 0x94CC/4, 0x10); + INSTANCE_WR(ctx, 0x96EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x970C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x972C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x974C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x976C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x978C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x97AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x97CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x97EC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x980C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x982C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x984C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x986C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x988C/4, 0x3F800000); + INSTANCE_WR(ctx, 0x98AC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x98CC/4, 0x3F800000); + INSTANCE_WR(ctx, 0x9C0C/4, 0x10); + INSTANCE_WR(ctx, 0x9C4C/4, 0x3F); + INSTANCE_WR(ctx, 0x9D2C/4, 0x1); + INSTANCE_WR(ctx, 0x9D6C/4, 0x1); + INSTANCE_WR(ctx, 0x9DAC/4, 0x1); + INSTANCE_WR(ctx, 0x9F4C/4, 0x11); + INSTANCE_WR(ctx, 0xA04C/4, 0xF); + INSTANCE_WR(ctx, 0xA14C/4, 0x11); + INSTANCE_WR(ctx, 0xA22C/4, 0x1); + INSTANCE_WR(ctx, 0xA24C/4, 0x1); + INSTANCE_WR(ctx, 0xA26C/4, 0x1); + INSTANCE_WR(ctx, 0xA28C/4, 0x2); + INSTANCE_WR(ctx, 0xA2AC/4, 0x1); + INSTANCE_WR(ctx, 0xA2CC/4, 0x2); + INSTANCE_WR(ctx, 0xA2EC/4, 0x1); + INSTANCE_WR(ctx, 0xA32C/4, 0x1FFE67); + INSTANCE_WR(ctx, 0xA36C/4, 0xFAC6881); + INSTANCE_WR(ctx, 0xA62C/4, 0x1); + INSTANCE_WR(ctx, 0xA64C/4, 0x2); + INSTANCE_WR(ctx, 0xA66C/4, 0x1); + INSTANCE_WR(ctx, 0xA68C/4, 0x1); + INSTANCE_WR(ctx, 0xA6AC/4, 0x2); + INSTANCE_WR(ctx, 0xA6CC/4, 0x1); + INSTANCE_WR(ctx, 0xA6EC/4, 0x1); + INSTANCE_WR(ctx, 0xA76C/4, 0x11); + INSTANCE_WR(ctx, 0xA78C/4, 0x1); + INSTANCE_WR(ctx, 0x1530/4, 0x4); + INSTANCE_WR(ctx, 0x17F0/4, 0x4); + INSTANCE_WR(ctx, 0x1810/4, 0x4); + INSTANCE_WR(ctx, 0x1830/4, 0x608080); + INSTANCE_WR(ctx, 0x18D0/4, 0x4); + INSTANCE_WR(ctx, 0x1930/4, 0x4); + INSTANCE_WR(ctx, 0x1950/4, 0x4); + INSTANCE_WR(ctx, 0x1970/4, 0x80); + INSTANCE_WR(ctx, 0x1990/4, 0x4); + INSTANCE_WR(ctx, 0x1E30/4, 0x4); + INSTANCE_WR(ctx, 0x1E50/4, 0x80); + INSTANCE_WR(ctx, 0x1E70/4, 0x4); + INSTANCE_WR(ctx, 0x1E90/4, 0x3020100); + INSTANCE_WR(ctx, 0x1EB0/4, 0x3); + INSTANCE_WR(ctx, 0x1ED0/4, 0x4); + INSTANCE_WR(ctx, 0x1F70/4, 0x4); + INSTANCE_WR(ctx, 0x1F90/4, 0x3); + INSTANCE_WR(ctx, 0x2010/4, 0x4); + INSTANCE_WR(ctx, 0x164B0/4, 0x4); + INSTANCE_WR(ctx, 0x164D0/4, 0x3); + INSTANCE_WR(ctx, 0x16710/4, 0xF); + INSTANCE_WR(ctx, 0x16890/4, 0x4); + INSTANCE_WR(ctx, 0x168B0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x168D0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x168F0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x16910/4, 0xFFFF); + INSTANCE_WR(ctx, 0x16A30/4, 0x1); + INSTANCE_WR(ctx, 0x16AB0/4, 0x1); + INSTANCE_WR(ctx, 0x16B70/4, 0x1); + INSTANCE_WR(ctx, 0x16D10/4, 0x1); + INSTANCE_WR(ctx, 0x16D30/4, 0x1); + INSTANCE_WR(ctx, 0x16D50/4, 0x2); + INSTANCE_WR(ctx, 0x16D70/4, 0x1); + INSTANCE_WR(ctx, 0x16D90/4, 0x1); + INSTANCE_WR(ctx, 0x16DB0/4, 0x2); + INSTANCE_WR(ctx, 0x16DD0/4, 0x1); + INSTANCE_WR(ctx, 0x16E10/4, 0x11); + INSTANCE_WR(ctx, 0x16F10/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x16F70/4, 0x4); + INSTANCE_WR(ctx, 0x16FF0/4, 0x11); + INSTANCE_WR(ctx, 0x17010/4, 0x1); + INSTANCE_WR(ctx, 0x17050/4, 0xCF); + INSTANCE_WR(ctx, 0x17070/4, 0xCF); + INSTANCE_WR(ctx, 0x17090/4, 0xCF); + INSTANCE_WR(ctx, 0x171F0/4, 0x1); + INSTANCE_WR(ctx, 0x17210/4, 0x1); + INSTANCE_WR(ctx, 0x17230/4, 0x2); + INSTANCE_WR(ctx, 0x17250/4, 0x1); + INSTANCE_WR(ctx, 0x17270/4, 0x1); + INSTANCE_WR(ctx, 0x17290/4, 0x2); + INSTANCE_WR(ctx, 0x172B0/4, 0x1); + INSTANCE_WR(ctx, 0x172F0/4, 0x1); + INSTANCE_WR(ctx, 0x17310/4, 0x1); + INSTANCE_WR(ctx, 0x17330/4, 0x1); + INSTANCE_WR(ctx, 0x17350/4, 0x1); + INSTANCE_WR(ctx, 0x17370/4, 0x1); + INSTANCE_WR(ctx, 0x17390/4, 0x1); + INSTANCE_WR(ctx, 0x173B0/4, 0x1); + INSTANCE_WR(ctx, 0x173D0/4, 0x1); + INSTANCE_WR(ctx, 0x173F0/4, 0x11); + INSTANCE_WR(ctx, 0x174F0/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x17510/4, 0xF); + INSTANCE_WR(ctx, 0x17610/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x17670/4, 0x11); + INSTANCE_WR(ctx, 0x17690/4, 0x1); + INSTANCE_WR(ctx, 0x17710/4, 0x4); + INSTANCE_WR(ctx, 0x177D0/4, 0x1); + INSTANCE_WR(ctx, 0x17870/4, 0x11); + INSTANCE_WR(ctx, 0x17970/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x179F0/4, 0x11); + INSTANCE_WR(ctx, 0x17A10/4, 0x1); + INSTANCE_WR(ctx, 0x17A50/4, 0x1); + INSTANCE_WR(ctx, 0x17A90/4, 0x1); + INSTANCE_WR(ctx, 0x17AD0/4, 0x7FF); + INSTANCE_WR(ctx, 0x17B10/4, 0x1); + INSTANCE_WR(ctx, 0x17B50/4, 0x1); + INSTANCE_WR(ctx, 0x180B0/4, 0x8); + INSTANCE_WR(ctx, 0x180D0/4, 0x8); + INSTANCE_WR(ctx, 0x180F0/4, 0x8); + INSTANCE_WR(ctx, 0x18110/4, 0x8); + INSTANCE_WR(ctx, 0x18130/4, 0x8); + INSTANCE_WR(ctx, 0x18150/4, 0x8); + INSTANCE_WR(ctx, 0x18170/4, 0x8); + INSTANCE_WR(ctx, 0x18190/4, 0x8); + INSTANCE_WR(ctx, 0x181B0/4, 0x11); + INSTANCE_WR(ctx, 0x182B0/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x182D0/4, 0x400); + INSTANCE_WR(ctx, 0x182F0/4, 0x400); + INSTANCE_WR(ctx, 0x18310/4, 0x400); + INSTANCE_WR(ctx, 0x18330/4, 0x400); + INSTANCE_WR(ctx, 0x18350/4, 0x400); + INSTANCE_WR(ctx, 0x18370/4, 0x400); + INSTANCE_WR(ctx, 0x18390/4, 0x400); + INSTANCE_WR(ctx, 0x183B0/4, 0x400); + INSTANCE_WR(ctx, 0x183D0/4, 0x300); + INSTANCE_WR(ctx, 0x183F0/4, 0x300); + INSTANCE_WR(ctx, 0x18410/4, 0x300); + INSTANCE_WR(ctx, 0x18430/4, 0x300); + INSTANCE_WR(ctx, 0x18450/4, 0x300); + INSTANCE_WR(ctx, 0x18470/4, 0x300); + INSTANCE_WR(ctx, 0x18490/4, 0x300); + INSTANCE_WR(ctx, 0x184B0/4, 0x300); + INSTANCE_WR(ctx, 0x184D0/4, 0x1); + INSTANCE_WR(ctx, 0x184F0/4, 0xF); + INSTANCE_WR(ctx, 0x185F0/4, 0x20); + INSTANCE_WR(ctx, 0x18610/4, 0x11); + INSTANCE_WR(ctx, 0x18630/4, 0x100); + INSTANCE_WR(ctx, 0x18670/4, 0x1); + INSTANCE_WR(ctx, 0x186D0/4, 0x40); + INSTANCE_WR(ctx, 0x186F0/4, 0x100); + INSTANCE_WR(ctx, 0x18730/4, 0x3); + INSTANCE_WR(ctx, 0x187D0/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x18850/4, 0x2); + INSTANCE_WR(ctx, 0x18870/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x189B0/4, 0x1); + INSTANCE_WR(ctx, 0x18A50/4, 0x4); + INSTANCE_WR(ctx, 0x18A90/4, 0x1); + INSTANCE_WR(ctx, 0x18AB0/4, 0x400); + INSTANCE_WR(ctx, 0x18AD0/4, 0x300); + INSTANCE_WR(ctx, 0x18AF0/4, 0x1001); + INSTANCE_WR(ctx, 0x18B70/4, 0x11); + INSTANCE_WR(ctx, 0x18C70/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x18C90/4, 0xF); + INSTANCE_WR(ctx, 0x18F90/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x19010/4, 0x11); + INSTANCE_WR(ctx, 0x19070/4, 0x4); + INSTANCE_WR(ctx, 0x190B0/4, 0x1); + INSTANCE_WR(ctx, 0x190D0/4, 0x1); + INSTANCE_WR(ctx, 0x19150/4, 0x1); + INSTANCE_WR(ctx, 0x191F0/4, 0x1); + INSTANCE_WR(ctx, 0x19230/4, 0x1); + INSTANCE_WR(ctx, 0x192B0/4, 0x2A712488); + INSTANCE_WR(ctx, 0x192F0/4, 0x4085C000); + INSTANCE_WR(ctx, 0x19310/4, 0x40); + INSTANCE_WR(ctx, 0x19330/4, 0x100); + INSTANCE_WR(ctx, 0x19350/4, 0x10100); + INSTANCE_WR(ctx, 0x19370/4, 0x2800000); + INSTANCE_WR(ctx, 0x195D0/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x195F0/4, 0x4E3BFDF); + INSTANCE_WR(ctx, 0x19610/4, 0x1); + INSTANCE_WR(ctx, 0x19650/4, 0xFFFF00); + INSTANCE_WR(ctx, 0x19670/4, 0x1); + INSTANCE_WR(ctx, 0x196D0/4, 0xFFFF00); + INSTANCE_WR(ctx, 0x197F0/4, 0x1); + INSTANCE_WR(ctx, 0x19830/4, 0x1); + INSTANCE_WR(ctx, 0x19850/4, 0x30201000); + INSTANCE_WR(ctx, 0x19870/4, 0x70605040); + INSTANCE_WR(ctx, 0x19890/4, 0xB8A89888); + INSTANCE_WR(ctx, 0x198B0/4, 0xF8E8D8C8); + INSTANCE_WR(ctx, 0x198F0/4, 0x1A); + INSTANCE_WR(ctx, 0x19930/4, 0x4); + INSTANCE_WR(ctx, 0x19BF0/4, 0x4); + INSTANCE_WR(ctx, 0x19C10/4, 0x4); + INSTANCE_WR(ctx, 0x19C30/4, 0x608080); + INSTANCE_WR(ctx, 0x19CD0/4, 0x4); + INSTANCE_WR(ctx, 0x19D30/4, 0x4); + INSTANCE_WR(ctx, 0x19D50/4, 0x4); + INSTANCE_WR(ctx, 0x19D70/4, 0x80); + INSTANCE_WR(ctx, 0x19D90/4, 0x4); + INSTANCE_WR(ctx, 0x1A230/4, 0x4); + INSTANCE_WR(ctx, 0x1A250/4, 0x80); + INSTANCE_WR(ctx, 0x1A270/4, 0x4); + INSTANCE_WR(ctx, 0x1A290/4, 0x3020100); + INSTANCE_WR(ctx, 0x1A2B0/4, 0x3); + INSTANCE_WR(ctx, 0x1A2D0/4, 0x4); + INSTANCE_WR(ctx, 0x1A370/4, 0x4); + INSTANCE_WR(ctx, 0x1A390/4, 0x3); + INSTANCE_WR(ctx, 0x1A410/4, 0x4); + INSTANCE_WR(ctx, 0x2E8B0/4, 0x4); + INSTANCE_WR(ctx, 0x2E8D0/4, 0x3); + INSTANCE_WR(ctx, 0x2EB10/4, 0xF); + INSTANCE_WR(ctx, 0x2EC90/4, 0x4); + INSTANCE_WR(ctx, 0x2ECB0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x2ECD0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x2ECF0/4, 0xFFFF); + INSTANCE_WR(ctx, 0x2ED10/4, 0xFFFF); + INSTANCE_WR(ctx, 0x2EE30/4, 0x1); + INSTANCE_WR(ctx, 0x2EEB0/4, 0x1); + INSTANCE_WR(ctx, 0x2EF70/4, 0x1); + INSTANCE_WR(ctx, 0x2F110/4, 0x1); + INSTANCE_WR(ctx, 0x2F130/4, 0x1); + INSTANCE_WR(ctx, 0x2F150/4, 0x2); + INSTANCE_WR(ctx, 0x2F170/4, 0x1); + INSTANCE_WR(ctx, 0x2F190/4, 0x1); + INSTANCE_WR(ctx, 0x2F1B0/4, 0x2); + INSTANCE_WR(ctx, 0x2F1D0/4, 0x1); + INSTANCE_WR(ctx, 0x2F210/4, 0x11); + INSTANCE_WR(ctx, 0x2F310/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x2F370/4, 0x4); + INSTANCE_WR(ctx, 0x2F3F0/4, 0x11); + INSTANCE_WR(ctx, 0x2F410/4, 0x1); + INSTANCE_WR(ctx, 0x2F450/4, 0xCF); + INSTANCE_WR(ctx, 0x2F470/4, 0xCF); + INSTANCE_WR(ctx, 0x2F490/4, 0xCF); + INSTANCE_WR(ctx, 0x2F5F0/4, 0x1); + INSTANCE_WR(ctx, 0x2F610/4, 0x1); + INSTANCE_WR(ctx, 0x2F630/4, 0x2); + INSTANCE_WR(ctx, 0x2F650/4, 0x1); + INSTANCE_WR(ctx, 0x2F670/4, 0x1); + INSTANCE_WR(ctx, 0x2F690/4, 0x2); + INSTANCE_WR(ctx, 0x2F6B0/4, 0x1); + INSTANCE_WR(ctx, 0x2F6F0/4, 0x1); + INSTANCE_WR(ctx, 0x2F710/4, 0x1); + INSTANCE_WR(ctx, 0x2F730/4, 0x1); + INSTANCE_WR(ctx, 0x2F750/4, 0x1); + INSTANCE_WR(ctx, 0x2F770/4, 0x1); + INSTANCE_WR(ctx, 0x2F790/4, 0x1); + INSTANCE_WR(ctx, 0x2F7B0/4, 0x1); + INSTANCE_WR(ctx, 0x2F7D0/4, 0x1); + INSTANCE_WR(ctx, 0x2F7F0/4, 0x11); + INSTANCE_WR(ctx, 0x2F8F0/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x2F910/4, 0xF); + INSTANCE_WR(ctx, 0x2FA10/4, 0x1FFE67); + INSTANCE_WR(ctx, 0x2FA70/4, 0x11); + INSTANCE_WR(ctx, 0x2FA90/4, 0x1); + INSTANCE_WR(ctx, 0x2FB10/4, 0x4); + INSTANCE_WR(ctx, 0x2FBD0/4, 0x1); + INSTANCE_WR(ctx, 0x2FC70/4, 0x11); + INSTANCE_WR(ctx, 0x2FD70/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x2FDF0/4, 0x11); + INSTANCE_WR(ctx, 0x2FE10/4, 0x1); + INSTANCE_WR(ctx, 0x2FE50/4, 0x1); + INSTANCE_WR(ctx, 0x2FE90/4, 0x1); + INSTANCE_WR(ctx, 0x2FED0/4, 0x7FF); + INSTANCE_WR(ctx, 0x2FF10/4, 0x1); + INSTANCE_WR(ctx, 0x2FF50/4, 0x1); + INSTANCE_WR(ctx, 0x304B0/4, 0x8); + INSTANCE_WR(ctx, 0x304D0/4, 0x8); + INSTANCE_WR(ctx, 0x304F0/4, 0x8); + INSTANCE_WR(ctx, 0x30510/4, 0x8); + INSTANCE_WR(ctx, 0x30530/4, 0x8); + INSTANCE_WR(ctx, 0x30550/4, 0x8); + INSTANCE_WR(ctx, 0x30570/4, 0x8); + INSTANCE_WR(ctx, 0x30590/4, 0x8); + INSTANCE_WR(ctx, 0x305B0/4, 0x11); + INSTANCE_WR(ctx, 0x306B0/4, 0xFAC6881); + INSTANCE_WR(ctx, 0x306D0/4, 0x400); + IN... [truncated message content] |
From: <an...@ke...> - 2008-09-24 00:11:34
|
libdrm/intel/intel_bufmgr_fake.c | 134 +++++++++++++++++++++++++++++---------- libdrm/intel/intel_bufmgr_gem.c | 4 - 2 files changed, 104 insertions(+), 34 deletions(-) New commits: commit 2db8e0c8ef8c7a66460fceda129533b364f6418c Author: Eric Anholt <er...@an...> Date: Tue Sep 23 17:06:01 2008 -0700 intel: Allow up to 15 seconds chewing on one buffer before acknowledging -EBUSY. The gltestperf demo in some cases took over seven seconds to make it through one batchbuffer on a GM965. Bug #17004. diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index e26d46c..a5b183a 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -273,7 +273,7 @@ static void _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq) { struct drm_i915_irq_wait iw; - int hw_seq; + int hw_seq, busy_count = 0; int ret; int kernel_lied; @@ -343,7 +343,17 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq) * kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then * returns -EBUSY. This is case C). We should catch this and then return * successfully. + * + * F) Hardware might take a long time on a buffer. + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5 take too + * long, it will return -EBUSY. Batchbuffers in the gltestperf demo were + * seen to take up to 7 seconds. We should catch early -EBUSY return + * and keep trying. */ + do { /* Keep a copy of last_dispatch so that if the wait -EBUSYs because the * hardware didn't catch up in 3 seconds, we can see if it at least made @@ -364,11 +374,18 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq) /* Catch case E */ if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000)) ret = 0; + + /* Catch case F: Allow up to 15 seconds chewing on one buffer. */ + if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch)) + busy_count = 0; + else + busy_count++; } while (kernel_lied || ret == -EAGAIN || ret == -EINTR || - (ret == -EBUSY && hw_seq != *bufmgr_fake->last_dispatch)); + (ret == -EBUSY && busy_count < 5)); if (ret != 0) { - drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__, ret); + drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__, __LINE__, + strerror(-ret)); abort(); } clear_fenced(bufmgr_fake, seq); commit 0dccf017ab629d69fce91e18b013882ecb45f55d Author: Eric Anholt <er...@an...> Date: Tue Sep 23 10:48:39 2008 -0700 intel: Replace wraparound test logic in bufmgr_fake. Again. I'd swapped the operands, so if we weren't in lockstep with the hardware we said the sequence was always passed. Additionally, a race was available that we might have failed at recovering from. Instead, I've replaced the logic with new stuff that should be more robust and not rely on all the parties in userland following the same IRQ_EMIT() == 1 protocol. Also, in a radical departure from past efforts, include a long comment describing the failure modes and how we're working around them. Thanks to haihao for catching the original issue. diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index 28c7f6b..e26d46c 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -166,7 +166,7 @@ typedef struct _bufmgr_fake { /** Driver-supplied argument to driver callbacks */ void *driver_priv; /* Pointer to kernel-updated sarea data for the last completed user irq */ - volatile unsigned int *last_dispatch; + volatile int *last_dispatch; int fd; @@ -264,61 +264,114 @@ _fence_emit_internal(dri_bufmgr_fake *bufmgr_fake) abort(); } - /* The kernel implementation of IRQ_WAIT is broken for wraparound, and has - * been since it was first introduced. It only checks for - * completed_seq >= seq, and thus returns success early for wrapped irq - * values if the CPU wins a race. - * - * We have to do it up front at emit when we discover wrap, so that another - * client can't race (after we drop the lock) to emit and wait and fail. - */ - if (seq == 0 || seq == 1) { - drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_FLUSH, &ie, sizeof(ie)); - } - DBG("emit 0x%08x\n", seq); bufmgr_fake->last_fence = seq; return bufmgr_fake->last_fence; } static void -_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie) +_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq) { struct drm_i915_irq_wait iw; - unsigned int last_dispatch; + int hw_seq; int ret; + int kernel_lied; if (bufmgr_fake->fence_wait != NULL) { - bufmgr_fake->fence_wait(cookie, bufmgr_fake->fence_priv); + bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv); return; } DBG("wait 0x%08x\n", iw.irq_seq); - /* The kernel implementation of IRQ_WAIT is broken for wraparound, and has - * been since it was first introduced. It only checks for - * completed_seq >= seq, and thus never returns for pre-wrapped irq values - * if the GPU wins the race. + iw.irq_seq = seq; + + /* The kernel IRQ_WAIT implementation is all sorts of broken. + * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned + * range. + * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit + * signed range. + * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit + * signed range. + * 4) It returns -EBUSY in 3 seconds even if the hardware is still + * successfully chewing through buffers. + * + * Assume that in userland we treat sequence numbers as ints, which makes + * some of the comparisons convenient, since the sequence numbers are + * all postive signed integers. + * + * From this we get several cases we need to handle. Here's a timeline. + * 0x2 0x7 0x7ffffff8 0x7ffffffd + * | | | | + * ------------------------------------------------------------------- + * + * A) Normal wait for hw to catch up + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to catch up. + * + * B) Normal wait for a sequence number that's already passed. + * seq hw_seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly. * - * So, check if it looks like a pre-wrapped value and just return success. + * C) Hardware has already wrapped around ahead of us + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait + * for hw_seq >= seq, which may never occur. Thus, we want to catch this + * in userland and return 0. + * + * D) We've wrapped around ahead of the hardware. + * seq hw_seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would return + * 0 quickly because hw_seq >= seq, even though the hardware isn't caught up. + * Thus, we need to catch this early return in userland and bother the + * kernel until the hardware really does catch up. + * + * E) Hardware might wrap after we test in userland. + * hw_seq seq + * | | + * ------------------------------------------------------------------- + * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >= hw_seq + * and wait. However, suppose hw_seq wraps before we make it into the + * kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then + * returns -EBUSY. This is case C). We should catch this and then return + * successfully. */ - if (*bufmgr_fake->last_dispatch - cookie > 0x4000000) - return; + do { + /* Keep a copy of last_dispatch so that if the wait -EBUSYs because the + * hardware didn't catch up in 3 seconds, we can see if it at least made + * progress and retry. + */ + hw_seq = *bufmgr_fake->last_dispatch; - iw.irq_seq = cookie; + /* Catch case C */ + if (seq - hw_seq > 0x40000000) + return; - do { - last_dispatch = *bufmgr_fake->last_dispatch; ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT, &iw, sizeof(iw)); - } while (ret == -EAGAIN || ret == -EINTR || - (ret == -EBUSY && last_dispatch != *bufmgr_fake->last_dispatch)); + /* Catch case D */ + kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch < + -0x40000000); + + /* Catch case E */ + if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000)) + ret = 0; + } while (kernel_lied || ret == -EAGAIN || ret == -EINTR || + (ret == -EBUSY && hw_seq != *bufmgr_fake->last_dispatch)); if (ret != 0) { drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__, ret); abort(); } - clear_fenced(bufmgr_fake, cookie); + clear_fenced(bufmgr_fake, seq); } static int @@ -1312,7 +1365,7 @@ void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr, { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; - bufmgr_fake->last_dispatch = last_dispatch; + bufmgr_fake->last_dispatch = (volatile int *)last_dispatch; } dri_bufmgr * @@ -1349,7 +1402,7 @@ intel_bufmgr_fake_init(int fd, bufmgr_fake->bufmgr.debug = 0; bufmgr_fake->fd = fd; - bufmgr_fake->last_dispatch = last_dispatch; + bufmgr_fake->last_dispatch = (volatile int *)last_dispatch; return &bufmgr_fake->bufmgr; } commit 1b3abe62b5751d0514d57aa850e584dca7dfc23e Author: Eric Anholt <er...@an...> Date: Tue Sep 23 10:47:21 2008 -0700 intel: Do strerror on errno, not on the -1 return value from ioctl. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 97e387f..cd36cdc 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -357,7 +357,7 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg); if (ret != 0) { fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n", - name, handle, strerror(-ret)); + name, handle, strerror(errno)); free(bo_gem); return NULL; } @@ -401,7 +401,7 @@ dri_gem_bo_free(dri_bo *bo) if (ret != 0) { fprintf(stderr, "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", - bo_gem->gem_handle, bo_gem->name, strerror(-ret)); + bo_gem->gem_handle, bo_gem->name, strerror(errno)); } free(bo); } |
From: <rn...@ke...> - 2008-10-04 14:39:51
|
bsd-core/drm_bufs.c | 7 ++++--- bsd-core/i915_drv.c | 4 ++-- bsd-core/mach64_drv.c | 4 ++-- bsd-core/mga_drv.c | 4 ++-- bsd-core/r128_drv.c | 4 ++-- bsd-core/radeon_drv.c | 4 ++-- bsd-core/savage_drv.c | 4 ++-- bsd-core/sis_drv.c | 4 ++-- bsd-core/tdfx_drv.c | 4 ++-- bsd-core/via_drv.c | 4 ++-- 10 files changed, 22 insertions(+), 21 deletions(-) New commits: commit 60cf3a4db4ab8ee81aca104624e89caf5587419b Author: Robert Noland <rn...@2h...> Date: Fri Oct 3 14:11:20 2008 -0400 [FreeBSD] Don't explicitly bzero driver softc. This is already handled for us. Suggested by John Baldwin diff --git a/bsd-core/i915_drv.c b/bsd-core/i915_drv.c index b2658f0..8770321 100644 --- a/bsd-core/i915_drv.c +++ b/bsd-core/i915_drv.c @@ -109,8 +109,6 @@ i915_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/mach64_drv.c b/bsd-core/mach64_drv.c index dcf35bb..03a533a 100644 --- a/bsd-core/mach64_drv.c +++ b/bsd-core/mach64_drv.c @@ -83,8 +83,6 @@ mach64_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/mga_drv.c b/bsd-core/mga_drv.c index dfb4b71..ae3675c 100644 --- a/bsd-core/mga_drv.c +++ b/bsd-core/mga_drv.c @@ -127,8 +127,6 @@ mga_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/r128_drv.c b/bsd-core/r128_drv.c index f325114..f239ea3 100644 --- a/bsd-core/r128_drv.c +++ b/bsd-core/r128_drv.c @@ -82,8 +82,6 @@ r128_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/radeon_drv.c b/bsd-core/radeon_drv.c index 6b90dd6..ab5968b 100644 --- a/bsd-core/radeon_drv.c +++ b/bsd-core/radeon_drv.c @@ -87,8 +87,6 @@ radeon_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/savage_drv.c b/bsd-core/savage_drv.c index 7f406e0..5cf2d61 100644 --- a/bsd-core/savage_drv.c +++ b/bsd-core/savage_drv.c @@ -73,8 +73,6 @@ savage_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/sis_drv.c b/bsd-core/sis_drv.c index c69a093..55a6231 100644 --- a/bsd-core/sis_drv.c +++ b/bsd-core/sis_drv.c @@ -67,8 +67,6 @@ sis_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/tdfx_drv.c b/bsd-core/tdfx_drv.c index 8c10ea8..6195256 100644 --- a/bsd-core/tdfx_drv.c +++ b/bsd-core/tdfx_drv.c @@ -69,8 +69,6 @@ tdfx_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); diff --git a/bsd-core/via_drv.c b/bsd-core/via_drv.c index d2a1e67..d16efc4 100644 --- a/bsd-core/via_drv.c +++ b/bsd-core/via_drv.c @@ -80,8 +80,6 @@ via_attach(device_t nbdev) { struct drm_device *dev = device_get_softc(nbdev); - bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_WAITOK | M_ZERO); commit 9c0ce38df3d9026785155d06fc62bdd7acaf8bf0 Author: Robert Noland <rn...@2h...> Date: Fri Oct 3 14:05:45 2008 -0400 [FreeBSD] Use M_WAITOK when allocating driver memory. We don't explicitly check for error here and M_WAITOK will just put the process to sleep waiting on resources to become available. Suggested by John Baldwin diff --git a/bsd-core/i915_drv.c b/bsd-core/i915_drv.c index def35f0..b2658f0 100644 --- a/bsd-core/i915_drv.c +++ b/bsd-core/i915_drv.c @@ -111,7 +111,9 @@ i915_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + i915_configure(dev); return drm_attach(nbdev, i915_pciidlist); diff --git a/bsd-core/mach64_drv.c b/bsd-core/mach64_drv.c index adb83d3..dcf35bb 100644 --- a/bsd-core/mach64_drv.c +++ b/bsd-core/mach64_drv.c @@ -85,7 +85,9 @@ mach64_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + mach64_configure(dev); return drm_attach(nbdev, mach64_pciidlist); diff --git a/bsd-core/mga_drv.c b/bsd-core/mga_drv.c index 5554236..dfb4b71 100644 --- a/bsd-core/mga_drv.c +++ b/bsd-core/mga_drv.c @@ -129,7 +129,9 @@ mga_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + mga_configure(dev); return drm_attach(nbdev, mga_pciidlist); diff --git a/bsd-core/r128_drv.c b/bsd-core/r128_drv.c index 4c20af4..f325114 100644 --- a/bsd-core/r128_drv.c +++ b/bsd-core/r128_drv.c @@ -84,7 +84,9 @@ r128_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + r128_configure(dev); return drm_attach(nbdev, r128_pciidlist); diff --git a/bsd-core/radeon_drv.c b/bsd-core/radeon_drv.c index 8ab3e99..6b90dd6 100644 --- a/bsd-core/radeon_drv.c +++ b/bsd-core/radeon_drv.c @@ -89,7 +89,9 @@ radeon_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + radeon_configure(dev); return drm_attach(nbdev, radeon_pciidlist); diff --git a/bsd-core/savage_drv.c b/bsd-core/savage_drv.c index 35fcdfa..7f406e0 100644 --- a/bsd-core/savage_drv.c +++ b/bsd-core/savage_drv.c @@ -75,7 +75,9 @@ savage_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + savage_configure(dev); return drm_attach(nbdev, savage_pciidlist); diff --git a/bsd-core/sis_drv.c b/bsd-core/sis_drv.c index 2ae1bff..c69a093 100644 --- a/bsd-core/sis_drv.c +++ b/bsd-core/sis_drv.c @@ -69,7 +69,9 @@ sis_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + sis_configure(dev); return drm_attach(nbdev, sis_pciidlist); diff --git a/bsd-core/tdfx_drv.c b/bsd-core/tdfx_drv.c index 44948b5..8c10ea8 100644 --- a/bsd-core/tdfx_drv.c +++ b/bsd-core/tdfx_drv.c @@ -71,7 +71,9 @@ tdfx_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + tdfx_configure(dev); return drm_attach(nbdev, tdfx_pciidlist); diff --git a/bsd-core/via_drv.c b/bsd-core/via_drv.c index e5f7d49..d2a1e67 100644 --- a/bsd-core/via_drv.c +++ b/bsd-core/via_drv.c @@ -82,7 +82,9 @@ via_attach(device_t nbdev) bzero(dev, sizeof(struct drm_device)); - dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO); + dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, + M_WAITOK | M_ZERO); + via_configure(dev); return drm_attach(nbdev, via_pciidlist); commit 4c92abfa8d0b9f2ab14e6b915bdffd47fd2e2474 Author: Robert Noland <rn...@2h...> Date: Fri Oct 3 13:56:50 2008 -0400 [FreeBSD] Do a bit of optimization on drm_order() diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index c9b5dc4..60f57cf 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -1099,11 +1099,12 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) int drm_order(unsigned long size) { int order; - unsigned long tmp; - for (order = 0, tmp = size; tmp >>= 1; ++order); + if (size == 0) + return 0; - if (size & ~(1 << order)) + order = ffsl(size) - 1; + if (size & ~(1ul << order)) ++order; return order; |
From: <an...@ke...> - 2008-12-15 00:53:57
|
libdrm/intel/intel_bufmgr_gem.c | 129 +++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 71 deletions(-) New commits: commit c86d431fe6174b1c2de531929213ea7dbd92326d Author: Eric Anholt <er...@an...> Date: Sun Dec 14 14:39:24 2008 -0800 intel: don't skip set_domain on mapping of shared buffers. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 5da1f6b..e5a1375 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -545,7 +545,7 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) bo_gem->virtual); bo->virtual = bo_gem->virtual; - if (!bo_gem->swrast) { + if (bo_gem->global_name != 0 || !bo_gem->swrast) { set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; if (write_enable) commit cebbd2edb54db1780a57b3873d1d3e40cb20043f Author: Eric Anholt <er...@an...> Date: Sun Dec 14 14:35:48 2008 -0800 intel: don't let named buffers into the BO cache. We wouldn't want some remaining 3D rendering to scribble on our batchbuffer. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index c29368d..5da1f6b 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -478,12 +478,13 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo) bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); /* Put the buffer into our internal cache for reuse if we can. */ - if (bucket != NULL && + if (bo_gem->global_name == 0 && + bucket != NULL && (bucket->max_entries == -1 || (bucket->max_entries > 0 && bucket->num_entries < bucket->max_entries))) { - bo_gem->name = 0; + bo_gem->name = NULL; bo_gem->validate_index = -1; bo_gem->relocs = NULL; bo_gem->reloc_target_bo = NULL; commit 782316801beeaf237af8272c41af93c96c708ac4 Author: Eric Anholt <er...@an...> Date: Sun Dec 14 14:32:09 2008 -0800 intel: Remove the mapped flag, which is adequately covered by bo_gem->virtual. diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index e681eee..c29368d 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -106,7 +106,6 @@ struct _drm_intel_bo_gem { int refcount; /** Boolean whether the mmap ioctl has been called for this buffer yet. */ - int mapped; uint32_t gem_handle; const char *name; @@ -134,7 +133,7 @@ struct _drm_intel_bo_gem { drm_intel_bo **reloc_target_bo; /** Number of entries in relocs */ int reloc_count; - /** Mapped address for the buffer */ + /** Mapped address for the buffer, saved across map/unmap cycles */ void *virtual; /** free list */ @@ -441,7 +440,7 @@ drm_intel_gem_bo_free(drm_intel_bo *bo) struct drm_gem_close close; int ret; - if (bo_gem->mapped) + if (bo_gem->virtual) munmap (bo_gem->virtual, bo_gem->bo.size); /* Close this object */ @@ -523,32 +522,27 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) /* Allow recursive mapping. Mesa may recursively map buffers with * nested display loops. */ - if (!bo_gem->mapped) { - - assert(bo->virtual == NULL); - + if (!bo_gem->virtual) { + struct drm_i915_gem_mmap mmap_arg; + DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); - - if (bo_gem->virtual == NULL) { - struct drm_i915_gem_mmap mmap_arg; - - memset(&mmap_arg, 0, sizeof(mmap_arg)); - mmap_arg.handle = bo_gem->gem_handle; - mmap_arg.offset = 0; - mmap_arg.size = bo->size; - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); - if (ret != 0) { - fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", - __FILE__, __LINE__, - bo_gem->gem_handle, bo_gem->name, strerror(errno)); - } - bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; + + memset(&mmap_arg, 0, sizeof(mmap_arg)); + mmap_arg.handle = bo_gem->gem_handle; + mmap_arg.offset = 0; + mmap_arg.size = bo->size; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); + if (ret != 0) { + fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, strerror(errno)); } - bo->virtual = bo_gem->virtual; + bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; bo_gem->swrast = 0; - bo_gem->mapped = 1; - DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual); } + DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, + bo_gem->virtual); + bo->virtual = bo_gem->virtual; if (!bo_gem->swrast) { set_domain.handle = bo_gem->gem_handle; @@ -583,55 +577,47 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) pthread_mutex_lock(&bufmgr_gem->lock); - /* Allow recursive mapping. Mesa may recursively map buffers with - * nested display loops. - */ - if (!bo_gem->mapped) { - - assert(bo->virtual == NULL); + /* Get a mapping of the buffer if we haven't before. */ + if (bo_gem->virtual == NULL) { + struct drm_i915_gem_mmap_gtt mmap_arg; DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); - if (bo_gem->virtual == NULL) { - struct drm_i915_gem_mmap_gtt mmap_arg; - - memset(&mmap_arg, 0, sizeof(mmap_arg)); - mmap_arg.handle = bo_gem->gem_handle; - - /* Get the fake offset back... */ - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, - &mmap_arg); - if (ret != 0) { - fprintf(stderr, - "%s:%d: Error preparing buffer map %d (%s): %s .\n", - __FILE__, __LINE__, - bo_gem->gem_handle, bo_gem->name, - strerror(errno)); - pthread_mutex_unlock(&bufmgr_gem->lock); - return ret; - } - - /* and mmap it */ - bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE, - MAP_SHARED, bufmgr_gem->fd, - mmap_arg.offset); - if (bo_gem->virtual == MAP_FAILED) { - fprintf(stderr, - "%s:%d: Error mapping buffer %d (%s): %s .\n", - __FILE__, __LINE__, - bo_gem->gem_handle, bo_gem->name, - strerror(errno)); - pthread_mutex_unlock(&bufmgr_gem->lock); - return errno; - } + memset(&mmap_arg, 0, sizeof(mmap_arg)); + mmap_arg.handle = bo_gem->gem_handle; + + /* Get the fake offset back... */ + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); + if (ret != 0) { + fprintf(stderr, + "%s:%d: Error preparing buffer map %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, + strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return ret; } - bo->virtual = bo_gem->virtual; - bo_gem->mapped = 1; - DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, - bo_gem->virtual); + /* and mmap it */ + bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE, + MAP_SHARED, bufmgr_gem->fd, + mmap_arg.offset); + if (bo_gem->virtual == MAP_FAILED) { + fprintf(stderr, + "%s:%d: Error mapping buffer %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, + strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return errno; + } } + bo->virtual = bo_gem->virtual; + + DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, + bo_gem->virtual); + /* Now move it to the GTT domain so that the CPU caches are flushed */ set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; @@ -662,7 +648,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo) if (bo == NULL) return 0; - assert(bo_gem->mapped); + assert(bo_gem->virtual != NULL); pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->swrast) { |