From: Maarten M. <mad...@gm...> - 2008-10-30 23:33:57
|
On Thu, Oct 30, 2008 at 10:08 PM, Jesse Barnes <jb...@vi...> wrote: > This commit adds the core mode setting routines for use by DRM drivers to > manage outputs and displays. Originally based on the X.Org Randr 1.2 > implementation, the code has since been heavily changed by Dave Airlie > with contributions by Jesse Barnes, Jakob Bornecrantz and others. > > This one should probably be split up a bit; I think the TTM stuff in > particular could be factored out fairly easily. > > diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c > index 738fd0f..31ce044 100644 > --- a/arch/x86/mm/pat.c > +++ b/arch/x86/mm/pat.c > @@ -11,6 +11,7 @@ > #include <linux/bootmem.h> > #include <linux/debugfs.h> > #include <linux/kernel.h> > +#include <linux/module.h> > #include <linux/gfp.h> > #include <linux/mm.h> > #include <linux/fs.h> > @@ -29,6 +30,7 @@ > > #ifdef CONFIG_X86_PAT > int __read_mostly pat_enabled = 1; > +EXPORT_SYMBOL_GPL(pat_enabled); > > void __cpuinit pat_disable(char *reason) > { > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig > index a8b33c2..6723182 100644 > --- a/drivers/gpu/drm/Kconfig > +++ b/drivers/gpu/drm/Kconfig > @@ -41,6 +41,14 @@ config DRM_RADEON > > If M is selected, the module will be called radeon. > > +config DRM_RADEON_KMS > + bool "Enable modesetting on radeon by default" > + depends on DRM_RADEON > + help > + Choose this option if you want kernel modesetting enabled by default, > + and you have a new enough userspace to support this. Running old > + userspaces with this enabled will cause pain. > + > config DRM_I810 > tristate "Intel I810" > depends on DRM && AGP && AGP_INTEL > @@ -76,6 +84,15 @@ config DRM_I915 > > endchoice > > +config DRM_I915_KMS > + bool "Enable modesetting on intel by default" > + depends on DRM_I915 > + help > + Choose this option if you want kernel modesetting enabled by default, > + and you have a new enough userspace to support this. Running old > + userspaces with this enabled will cause pain. > + > + > config DRM_MGA > tristate "Matrox g200/g400" > depends on DRM > diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile > index 74da994..48567a9 100644 > --- a/drivers/gpu/drm/Makefile > +++ b/drivers/gpu/drm/Makefile > @@ -9,7 +9,9 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ > drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ > drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ > drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ > - drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o > + drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ > + drm_fence.o drm_bo.o drm_ttm.o drm_bo_move.o \ > + drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o > > drm-$(CONFIG_COMPAT) += drm_ioc32.o > > diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c > index c533d0c..adc57dd 100644 > --- a/drivers/gpu/drm/ati_pcigart.c > +++ b/drivers/gpu/drm/ati_pcigart.c > @@ -34,9 +34,55 @@ > #include "drmP.h" > > # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ > +# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) > > -static int drm_ati_alloc_pcigart_table(struct drm_device *dev, > - struct drm_ati_pcigart_info *gart_info) > +#define ATI_PCIE_WRITE 0x4 > +#define ATI_PCIE_READ 0x8 > + > +static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t > addr, volatile u32 *pci_gart) > +{ > + u32 page_base; > + > + page_base = (u32)addr & ATI_PCIGART_PAGE_MASK; > + switch(gart_info->gart_reg_if) { > + case DRM_ATI_GART_IGP: > + page_base |= (upper_32_bits(addr) & 0xff) << 4; > + page_base |= 0xc; > + break; > + case DRM_ATI_GART_PCIE: > + page_base >>= 8; > + page_base |= (upper_32_bits(addr) & 0xff) << 24; > + page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; > + break; > + default: > + case DRM_ATI_GART_PCI: > + break; > + } > + *pci_gart = cpu_to_le32(page_base); > +} > + > +static __inline__ dma_addr_t gart_get_page_from_table(struct drm_ati_pcigart_info *gart_info, > volatile u32 *pci_gart) > +{ > + dma_addr_t retval; > + switch(gart_info->gart_reg_if) { > + case DRM_ATI_GART_IGP: > + retval = (*pci_gart & ATI_PCIGART_PAGE_MASK); > + retval += (((*pci_gart & 0xf0) >> 4) << 16) << 16; > + break; > + case DRM_ATI_GART_PCIE: > + retval = (*pci_gart & ~0xc); > + retval <<= 8; > + break; > + case DRM_ATI_GART_PCI: > + retval = *pci_gart; > + break; > + } > + > + return retval; > +} > + > +int drm_ati_alloc_pcigart_table(struct drm_device *dev, > + struct drm_ati_pcigart_info *gart_info) > { > gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, > PAGE_SIZE, > @@ -44,12 +90,25 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, > if (gart_info->table_handle == NULL) > return -ENOMEM; > > +#ifdef CONFIG_X86 > + /* IGPs only exist on x86 in any case */ > + if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) > + set_memory_uc((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> > PAGE_SHIFT); > +#endif > + > + memset(gart_info->table_handle->vaddr, 0, gart_info->table_size); > return 0; > } > +EXPORT_SYMBOL(drm_ati_alloc_pcigart_table); > > static void drm_ati_free_pcigart_table(struct drm_device *dev, > struct drm_ati_pcigart_info *gart_info) > { > +#ifdef CONFIG_X86 > + /* IGPs only exist on x86 in any case */ > + if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) > + set_memory_wb((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> > PAGE_SHIFT); > +#endif > drm_pci_free(dev, gart_info->table_handle); > gart_info->table_handle = NULL; > } > @@ -63,7 +122,6 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info > > /* we need to support large memory configurations */ > if (!entry) { > - DRM_ERROR("no scatter/gather memory!\n"); > return 0; > } > > @@ -98,17 +156,14 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga > struct drm_sg_mem *entry = dev->sg; > void *address = NULL; > unsigned long pages; > - u32 *pci_gart, page_base; > + u32 *pci_gart; > dma_addr_t bus_address = 0; > int i, j, ret = 0; > int max_pages; > + dma_addr_t entry_addr; > > - if (!entry) { > - DRM_ERROR("no scatter/gather memory!\n"); > - goto done; > - } > > - if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { > + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle == NULL) { > DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); > > ret = drm_ati_alloc_pcigart_table(dev, gart_info); > @@ -116,15 +171,19 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga > DRM_ERROR("cannot allocate PCI GART page!\n"); > goto done; > } > + } > > + if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { > address = gart_info->table_handle->vaddr; > bus_address = gart_info->table_handle->busaddr; > } else { > address = gart_info->addr; > bus_address = gart_info->bus_addr; > - DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n", > - (unsigned long long)bus_address, > - (unsigned long)address); > + } > + > + if (!entry) { > + DRM_ERROR("no scatter/gather memory!\n"); > + goto done; > } > > pci_gart = (u32 *) address; > @@ -133,8 +192,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga > pages = (entry->pages <= max_pages) > ? entry->pages : max_pages; > > - memset(pci_gart, 0, max_pages * sizeof(u32)); > - > for (i = 0; i < pages; i++) { > /* we need to support large memory configurations */ > entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], > @@ -146,32 +203,18 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga > bus_address = 0; > goto done; > } > - page_base = (u32) entry->busaddr[i]; > > + entry_addr = entry->busaddr[i]; > for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { > - switch(gart_info->gart_reg_if) { > - case DRM_ATI_GART_IGP: > - *pci_gart = cpu_to_le32((page_base) | 0xc); > - break; > - case DRM_ATI_GART_PCIE: > - *pci_gart = cpu_to_le32((page_base >> 8) | 0xc); > - break; > - default: > - case DRM_ATI_GART_PCI: > - *pci_gart = cpu_to_le32(page_base); > - break; > - } > + gart_insert_page_into_table(gart_info, entry_addr, pci_gart); > pci_gart++; > - page_base += ATI_PCIGART_PAGE_SIZE; > + entry_addr += ATI_PCIGART_PAGE_SIZE; > } > } > + > ret = 1; > > -#if defined(__i386__) || defined(__x86_64__) > - wbinvd(); > -#else > mb(); > -#endif > > done: > gart_info->addr = address; > @@ -179,3 +222,142 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga > return ret; > } > EXPORT_SYMBOL(drm_ati_pcigart_init); > + > +static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) > +{ > + return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); > +} > + > +static int ati_pcigart_populate(struct drm_ttm_backend *backend, > + unsigned long num_pages, > + struct page **pages, > + struct page *dummy_read_page) > +{ > + struct ati_pcigart_ttm_backend *atipci_be = > + container_of(backend, struct ati_pcigart_ttm_backend, backend); > + > + atipci_be->pages = pages; > + atipci_be->num_pages = num_pages; > + atipci_be->populated = 1; > + return 0; > +} > + > +static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend, > + struct drm_bo_mem_reg *bo_mem) > +{ > + struct ati_pcigart_ttm_backend *atipci_be = > + container_of(backend, struct ati_pcigart_ttm_backend, backend); > + off_t j; > + int i; > + struct drm_ati_pcigart_info *info = atipci_be->gart_info; > + volatile u32 *pci_gart; > + dma_addr_t offset = bo_mem->mm_node->start; > + dma_addr_t page_base; > + > + pci_gart = info->addr; > + > + j = offset; > + while (j < (offset + atipci_be->num_pages)) { > + if (gart_get_page_from_table(info, pci_gart + j)) > + return -EBUSY; > + j++; > + } > + > + for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { > + struct page *cur_page = atipci_be->pages[i]; > + /* write value */ > + page_base = page_to_phys(cur_page); > + gart_insert_page_into_table(info, page_base, pci_gart + j); > + } > + > + mb(); > + atipci_be->gart_flush_fn(atipci_be->dev); > + > + atipci_be->bound = 1; > + atipci_be->offset = offset; > + /* need to traverse table and add entries */ > + DRM_DEBUG("\n"); > + return 0; > +} > + > +static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend) > +{ > + struct ati_pcigart_ttm_backend *atipci_be = > + container_of(backend, struct ati_pcigart_ttm_backend, backend); > + struct drm_ati_pcigart_info *info = atipci_be->gart_info; > + unsigned long offset = atipci_be->offset; > + int i; > + off_t j; > + volatile u32 *pci_gart = info->addr; > + > + if (atipci_be->bound != 1) > + return -EINVAL; > + > + for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { > + *(pci_gart + j) = 0; > + } > + > + mb(); > + atipci_be->gart_flush_fn(atipci_be->dev); > + atipci_be->bound = 0; > + atipci_be->offset = 0; > + return 0; > +} > + > +static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend) > +{ > + struct ati_pcigart_ttm_backend *atipci_be = > + container_of(backend, struct ati_pcigart_ttm_backend, backend); > + > + DRM_DEBUG("\n"); > + if (atipci_be->pages) { > + backend->func->unbind(backend); > + atipci_be->pages = NULL; > + > + } > + atipci_be->num_pages = 0; > +} > + > +static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend) > +{ > + struct ati_pcigart_ttm_backend *atipci_be; > + if (backend) { > + DRM_DEBUG("\n"); > + atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); > + if (atipci_be) { > + if (atipci_be->pages) { > + backend->func->clear(backend); > + } > + drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM); > + } > + } > +} > + > +static struct drm_ttm_backend_func ati_pcigart_ttm_backend = > +{ > + .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust, > + .populate = ati_pcigart_populate, > + .clear = ati_pcigart_clear_ttm, > + .bind = ati_pcigart_bind_ttm, > + .unbind = ati_pcigart_unbind_ttm, > + .destroy = ati_pcigart_destroy_ttm, > +}; > + > +struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info > *info, void (*gart_flush_fn)(struct drm_device *dev)) > +{ > + struct ati_pcigart_ttm_backend *atipci_be; > + > + atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM); > + if (!atipci_be) > + return NULL; > + > + atipci_be->populated = 0; > + atipci_be->backend.func = &ati_pcigart_ttm_backend; > +// atipci_be->backend.mem_type = DRM_BO_MEM_TT; > + atipci_be->gart_info = info; > + atipci_be->gart_flush_fn = gart_flush_fn; > + atipci_be->dev = dev; > + > + return &atipci_be->backend; > +} > +EXPORT_SYMBOL(ati_pcigart_init_ttm); > diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c > index 3d33b82..e048aa2 100644 > --- a/drivers/gpu/drm/drm_agpsupport.c > +++ b/drivers/gpu/drm/drm_agpsupport.c > @@ -496,6 +496,177 @@ drm_agp_bind_pages(struct drm_device *dev, > } > EXPORT_SYMBOL(drm_agp_bind_pages); > > +/* > + * AGP ttm backend interface. > + */ > + > +#ifndef AGP_USER_TYPES > +#define AGP_USER_TYPES (1 << 16) > +#define AGP_USER_MEMORY (AGP_USER_TYPES) > +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) > +#endif > +#define AGP_REQUIRED_MAJOR 0 > +#define AGP_REQUIRED_MINOR 102 > + > +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) > +{ > + return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); > +} > + > + > +static int drm_agp_populate(struct drm_ttm_backend *backend, > + unsigned long num_pages, struct page **pages, > + struct page *dummy_read_page) > +{ > + struct drm_agp_ttm_backend *agp_be = > + container_of(backend, struct drm_agp_ttm_backend, backend); > + struct page **cur_page, **last_page = pages + num_pages; > + DRM_AGP_MEM *mem; > + int dummy_page_count = 0; > + > + if (drm_alloc_memctl(num_pages * sizeof(void *))) > + return -1; > + > + DRM_DEBUG("drm_agp_populate_ttm\n"); > + mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); > + if (!mem) { > + drm_free_memctl(num_pages * sizeof(void *)); > + return -1; > + } > + > + DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); > + mem->page_count = 0; > + for (cur_page = pages; cur_page < last_page; ++cur_page) { > + struct page *page = *cur_page; > + if (!page) { > + page = dummy_read_page; > + ++dummy_page_count; > + } > + mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page)); > + } > + if (dummy_page_count) > + DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count); > + agp_be->mem = mem; > + return 0; > +} > + > +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, > + struct drm_bo_mem_reg *bo_mem) > +{ > + struct drm_agp_ttm_backend *agp_be = > + container_of(backend, struct drm_agp_ttm_backend, backend); > + DRM_AGP_MEM *mem = agp_be->mem; > + int ret; > + int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & > DRM_BO_FLAG_CACHED_MAPPED); > + > + DRM_DEBUG("drm_agp_bind_ttm\n"); > + mem->is_flushed = true; > + mem->type = AGP_USER_MEMORY; > + /* CACHED MAPPED implies not snooped memory */ > + if (snooped) > + mem->type = AGP_USER_CACHED_MEMORY; > + > + ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); > + if (ret) > + DRM_ERROR("AGP Bind memory failed\n"); > + > + DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? > + DRM_BE_FLAG_BOUND_CACHED : 0, > + DRM_BE_FLAG_BOUND_CACHED); > + return ret; > +} > + > +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) > +{ > + struct drm_agp_ttm_backend *agp_be = > + container_of(backend, struct drm_agp_ttm_backend, backend); > + > + DRM_DEBUG("drm_agp_unbind_ttm\n"); > + if (agp_be->mem->is_bound) > + return drm_agp_unbind_memory(agp_be->mem); > + else > + return 0; > +} > + > +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) > +{ > + struct drm_agp_ttm_backend *agp_be = > + container_of(backend, struct drm_agp_ttm_backend, backend); > + DRM_AGP_MEM *mem = agp_be->mem; > + > + DRM_DEBUG("drm_agp_clear_ttm\n"); > + if (mem) { > + unsigned long num_pages = mem->page_count; > + backend->func->unbind(backend); > + agp_free_memory(mem); > + drm_free_memctl(num_pages * sizeof(void *)); > + } > + agp_be->mem = NULL; > +} > + > +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) > +{ > + struct drm_agp_ttm_backend *agp_be; > + > + if (backend) { > + DRM_DEBUG("drm_agp_destroy_ttm\n"); > + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); > + if (agp_be) { > + if (agp_be->mem) > + backend->func->clear(backend); > + drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM); > + } > + } > +} > + > +static struct drm_ttm_backend_func agp_ttm_backend = { > + .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, > + .populate = drm_agp_populate, > + .clear = drm_agp_clear_ttm, > + .bind = drm_agp_bind_ttm, > + .unbind = drm_agp_unbind_ttm, > + .destroy = drm_agp_destroy_ttm, > +}; > + > +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) > +{ > + > + struct drm_agp_ttm_backend *agp_be; > + struct agp_kern_info *info; > + > + if (!dev->agp) { > + DRM_ERROR("AGP is not initialized.\n"); > + return NULL; > + } > + info = &dev->agp->agp_info; > + > + if (info->version.major != AGP_REQUIRED_MAJOR || > + info->version.minor < AGP_REQUIRED_MINOR) { > + DRM_ERROR("Wrong agpgart version %d.%d\n" > + "\tYou need at least version %d.%d.\n", > + info->version.major, > + info->version.minor, > + AGP_REQUIRED_MAJOR, > + AGP_REQUIRED_MINOR); > + return NULL; > + } > + > + > + agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM); > + if (!agp_be) > + return NULL; > + > + agp_be->mem = NULL; > + > + agp_be->bridge = dev->agp->bridge; > + agp_be->populated = false; > + agp_be->backend.func = &agp_ttm_backend; > + agp_be->backend.dev = dev; > + > + return &agp_be->backend; > +} > +EXPORT_SYMBOL(drm_agp_init_ttm); > + > void drm_agp_chipset_flush(struct drm_device *dev) > { > agp_flush_chipset(dev->agp->bridge); > diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c > index a734627..ca7a9ef 100644 > --- a/drivers/gpu/drm/drm_auth.c > +++ b/drivers/gpu/drm/drm_auth.c > @@ -45,14 +45,15 @@ > * the one with matching magic number, while holding the drm_device::struct_mutex > * lock. > */ > -static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) > +static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic) > { > struct drm_file *retval = NULL; > struct drm_magic_entry *pt; > struct drm_hash_item *hash; > + struct drm_device *dev = master->minor->dev; > > mutex_lock(&dev->struct_mutex); > - if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { > + if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { > pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); > retval = pt->priv; > } > @@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic > * associated the magic number hash key in drm_device::magiclist, while holding > * the drm_device::struct_mutex lock. > */ > -static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, > +static int drm_add_magic(struct drm_master *master, struct drm_file *priv, > drm_magic_t magic) > { > struct drm_magic_entry *entry; > - > + struct drm_device *dev = master->minor->dev; > DRM_DEBUG("%d\n", magic); > > entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC); > @@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, > return -ENOMEM; > memset(entry, 0, sizeof(*entry)); > entry->priv = priv; > - > entry->hash_item.key = (unsigned long)magic; > mutex_lock(&dev->struct_mutex); > - drm_ht_insert_item(&dev->magiclist, &entry->hash_item); > - list_add_tail(&entry->head, &dev->magicfree); > + drm_ht_insert_item(&master->magiclist, &entry->hash_item); > + list_add_tail(&entry->head, &master->magicfree); > mutex_unlock(&dev->struct_mutex); > > return 0; > @@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, > * Searches and unlinks the entry in drm_device::magiclist with the magic > * number hash key, while holding the drm_device::struct_mutex lock. > */ > -static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) > +static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) > { > struct drm_magic_entry *pt; > struct drm_hash_item *hash; > + struct drm_device *dev = master->minor->dev; > > DRM_DEBUG("%d\n", magic); > > mutex_lock(&dev->struct_mutex); > - if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { > + if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { > mutex_unlock(&dev->struct_mutex); > return -EINVAL; > } > pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); > - drm_ht_remove_item(&dev->magiclist, hash); > + drm_ht_remove_item(&master->magiclist, hash); > list_del(&pt->head); > mutex_unlock(&dev->struct_mutex); > > @@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) > ++sequence; /* reserve 0 */ > auth->magic = sequence++; > spin_unlock(&lock); > - } while (drm_find_file(dev, auth->magic)); > + } while (drm_find_file(file_priv->master, auth->magic)); > file_priv->magic = auth->magic; > - drm_add_magic(dev, file_priv, auth->magic); > + drm_add_magic(file_priv->master, file_priv, auth->magic); > } > > DRM_DEBUG("%u\n", auth->magic); > @@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data, > struct drm_file *file; > > DRM_DEBUG("%u\n", auth->magic); > - if ((file = drm_find_file(dev, auth->magic))) { > + if ((file = drm_find_file(file_priv->master, auth->magic))) { > file->authenticated = 1; > - drm_remove_magic(dev, auth->magic); > + drm_remove_magic(file_priv->master, auth->magic); > return 0; > } > return -EINVAL; > diff --git a/drivers/gpu/drm/drm_bo.c b/drivers/gpu/drm/drm_bo.c > new file mode 100644 > index 0000000..5cec5a0 > --- /dev/null > +++ b/drivers/gpu/drm/drm_bo.c > @@ -0,0 +1,2116 @@ > +/************************************************************************** > + * > + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA > + * All Rights Reserved. > + * > + * Permission is hereby granted, free of charge, to any person obtaining a > + * copy of this software and associated documentation files (the > + * "Software"), to deal in the Software without restriction, including > + * without limitation the rights to use, copy, modify, merge, publish, > + * distribute, sub license, and/or sell copies of the Software, and to > + * permit persons to whom the Software is furnished to do so, subject to > + * the following conditions: > + * > + * The above copyright notice and this permission notice (including the > + * next paragraph) shall be included in all copies or substantial portions > + * of the Software. > + * > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, > + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL > + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, > + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR > + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE > + * USE OR OTHER DEALINGS IN THE SOFTWARE. > + * > + **************************************************************************/ > +/* > + * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com> > + */ > + > +#include "drmP.h" > + > +/* > + * Locking may look a bit complicated but isn't really: > + * > + * The buffer usage atomic_t needs to be protected by dev->struct_mutex > + * when there is a chance that it can be zero before or after the operation. > + * > + * dev->struct_mutex also protects all lists and list heads, > + * Hash tables and hash heads. > + * > + * bo->mutex protects the buffer object itself excluding the usage field. > + * bo->mutex does also protect the buffer list heads, so to manipulate those, > + * we need both the bo->mutex and the dev->struct_mutex. > + * > + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal > + * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, > + * the list traversal will, in general, need to be restarted. > + * > + */ > + > +static void drm_bo_destroy_locked(struct drm_buffer_object *bo); > +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); > +static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); > + > +static inline uint64_t drm_bo_type_flags(unsigned type) > +{ > + return (1ULL << (24 + type)); > +} > + > +/* > + * bo locked. dev->struct_mutex locked. > + */ > + > +void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) > +{ > + struct drm_mem_type_manager *man; > + > + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); > + DRM_ASSERT_LOCKED(&bo->mutex); > + > + man = &bo->dev->bm.man[bo->pinned_mem_type]; > + list_add_tail(&bo->pinned_lru, &man->pinned); > +} > + > +void drm_bo_add_to_lru(struct drm_buffer_object *bo) > +{ > + struct drm_mem_type_manager *man; > + > + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); > + > + if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) > + || bo->mem.mem_type != bo->pinned_mem_type) { > + man = &bo->dev->bm.man[bo->mem.mem_type]; > + list_add_tail(&bo->lru, &man->lru); > + } else { > + INIT_LIST_HEAD(&bo->lru); > + } > +} > + > +static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) > +{ > +#ifdef DRM_ODD_MM_COMPAT > + int ret; > + > + if (!bo->map_list.map) > + return 0; > + > + ret = drm_bo_lock_kmm(bo); > + if (ret) > + return ret; > + drm_bo_unmap_virtual(bo); > + if (old_is_pci) > + drm_bo_finish_unmap(bo); > +#else > + if (!bo->map_list.map) > + return 0; > + > + drm_bo_unmap_virtual(bo); > +#endif > + return 0; > +} > + > +static void drm_bo_vm_post_move(struct drm_buffer_object *bo) > +{ > +#ifdef DRM_ODD_MM_COMPAT > + int ret; > + > + if (!bo->map_list.map) > + return; > + > + ret = drm_bo_remap_bound(bo); > + if (ret) { > + DRM_ERROR("Failed to remap a bound buffer object.\n" > + "\tThis might cause a sigbus later.\n"); > + } > + drm_bo_unlock_kmm(bo); > +#endif > +} > + > +/* > + * Call bo->mutex locked. > + */ > + > +int drm_bo_add_ttm(struct drm_buffer_object *bo) > +{ > + struct drm_device *dev = bo->dev; > + int ret = 0; > + uint32_t page_flags = 0; > + > + DRM_ASSERT_LOCKED(&bo->mutex); > + bo->ttm = NULL; > + > + if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) > + page_flags |= DRM_TTM_PAGE_WRITE; > + > + switch (bo->type) { > + case drm_bo_type_device: > + case drm_bo_type_kernel: > + bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, > + page_flags, dev->bm.dummy_read_page); > + if (!bo->ttm) > + ret = -ENOMEM; > + break; > + case drm_bo_type_user: > + bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, > + page_flags | DRM_TTM_PAGE_USER, > + dev->bm.dummy_read_page); > + if (!bo->ttm) > + ret = -ENOMEM; > + > + ret = drm_ttm_set_user(bo->ttm, current, > + bo->buffer_start, > + bo->num_pages); > + if (ret) > + return ret; > + > + break; > + default: > + DRM_ERROR("Illegal buffer object type\n"); > + ret = -EINVAL; > + break; > + } > + > + return ret; > +} > +EXPORT_SYMBOL(drm_bo_add_ttm); > + > +static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, > + struct drm_bo_mem_reg *mem, > + int evict, int no_wait) > +{ > + struct drm_device *dev = bo->dev; > + struct drm_buffer_manager *bm = &dev->bm; > + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); > + int new_is_pci = drm_mem_reg_is_pci(dev, mem); > + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; > + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; > + int ret = 0; > + > + if (old_is_pci || new_is_pci || > + ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) > + ret = drm_bo_vm_pre_move(bo, old_is_pci); > + if (ret) > + return ret; > + > + /* > + * Create and bind a ttm if required. > + */ > + > + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { > + ret = drm_bo_add_ttm(bo); > + if (ret) > + goto out_err; > + > + if (mem->mem_type != DRM_BO_MEM_LOCAL) { > + ret = drm_ttm_bind(bo->ttm, mem); > + if (ret) > + goto out_err; > + } > + > + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) { > + > + struct drm_bo_mem_reg *old_mem = &bo->mem; > + uint64_t save_flags = old_mem->flags; > + uint64_t save_proposed_flags = old_mem->proposed_flags; > + > + *old_mem = *mem; > + mem->mm_node = NULL; > + old_mem->proposed_flags = save_proposed_flags; > + DRM_FLAG_MASKED(save_flags, mem->flags, > + DRM_BO_MASK_MEMTYPE); > + goto moved; > + } > + > + } > + > + if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && > + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) > + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); > + else if (dev->driver->bo_driver->move) > + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); > + else > + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); > + > + if (ret) > + goto out_err; > + > +moved: > + if (old_is_pci || new_is_pci) > + drm_bo_vm_post_move(bo); > + > + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { > + ret = > + dev->driver->bo_driver->invalidate_caches(dev, > + bo->mem.flags); > + if (ret) > + DRM_ERROR("Can not flush read caches\n"); > + } > + > + DRM_FLAG_MASKED(bo->priv_flags, > + (evict) ? _DRM_BO_FLAG_EVICTED : 0, > + _DRM_BO_FLAG_EVICTED); > + > + if (bo->mem.mm_node) > + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + > + bm->man[bo->mem.mem_type].gpu_offset; > + > + > + return 0; > + > +out_err: > + if (old_is_pci || new_is_pci) > + drm_bo_vm_post_move(bo); > + > + new_man = &bm->man[bo->mem.mem_type]; > + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { > + drm_ttm_unbind(bo->ttm); > + drm_ttm_destroy(bo->ttm); > + bo->ttm = NULL; > + } > + > + return ret; > +} > + > +/* > + * Call bo->mutex locked. > + * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise. > + */ > + > +static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced) > +{ > + struct drm_fence_object *fence = bo->fence; > + > + if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) > + return -EBUSY; > + > + if (fence) { > + if (drm_fence_object_signaled(fence, bo->fence_type)) { > + drm_fence_usage_deref_unlocked(&bo->fence); > + return 0; > + } > + drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); > + if (drm_fence_object_signaled(fence, bo->fence_type)) { > + drm_fence_usage_deref_unlocked(&bo->fence); > + return 0; > + } > + return -EBUSY; > + } > + return 0; > +} > + > +static int drm_bo_check_unfenced(struct drm_buffer_object *bo) > +{ > + int ret; > + > + mutex_lock(&bo->mutex); > + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); > + mutex_unlock(&bo->mutex); > + return ret; > +} > + > + > +/* > + * Call bo->mutex locked. > + * Wait until the buffer is idle. > + */ > + > +int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible, > + int no_wait, int check_unfenced) > +{ > + int ret; > + > + DRM_ASSERT_LOCKED(&bo->mutex); > + while(unlikely(drm_bo_busy(bo, check_unfenced))) { > + if (no_wait) > + return -EBUSY; > + > + if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) { > + mutex_unlock(&bo->mutex); > + wait_event(bo->event_queue, !drm_bo_check_unfenced(bo)); > + mutex_lock(&bo->mutex); > + bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; > + } > + > + if (bo->fence) { > + struct drm_fence_object *fence; > + uint32_t fence_type = bo->fence_type; > + > + drm_fence_reference_unlocked(&fence, bo->fence); > + mutex_unlock(&bo->mutex); > + > + ret = drm_fence_object_wait(fence, lazy, !interruptible, > + fence_type); > + > + drm_fence_usage_deref_unlocked(&fence); > + mutex_lock(&bo->mutex); > + bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED; > + if (ret) > + return ret; > + } > + > + } > + return 0; > +} > +EXPORT_SYMBOL(drm_bo_wait); > + > +static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) > +{ > + struct drm_device *dev = bo->dev; > + struct drm_buffer_manager *bm = &dev->bm; > + > + if (bo->fence) { > + if (bm->nice_mode) { > + unsigned long _end = jiffies + 3 * DRM_HZ; > + int ret; > + do { > + ret = drm_bo_wait(bo, 0, 0, 0, 0); > + if (ret && allow_errors) > + return ret; > + > + } while (ret && !time_after_eq(jiffies, _end)); > + > + if (bo->fence) { > + bm->nice_mode = 0; > + DRM_ERROR("Detected GPU lockup or " > + "fence driver was taken down. " > + "Evicting buffer.\n"); > + } > + } > + if (bo->fence) > + drm_fence_usage_deref_unlocked(&bo->fence); > + } > + return 0; > +} > + > +/* > + * Call dev->struct_mutex locked. > + * Attempts to remove all private references to a buffer by expiring its > + * fence object and removing from lru lists and memory managers. > + */ > + > +static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) > +{ > + struct drm_device *dev = bo->dev; > + struct drm_buffer_manager *bm = &dev->bm; > + > + DRM_ASSERT_LOCKED(&dev->struct_mutex); > + > + atomic_inc(&bo->usage); > + mutex_unlock(&dev->struct_mutex); > + mutex_lock(&bo->mutex); > + > + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); > + > + if (bo->fence && drm_fence_object_signaled(bo->fence, > + bo->fence_type)) > + drm_fence_usage_deref_unlocked(&bo->fence); > + > + if (bo->fence && remove_all) > + (void)drm_bo_expire_fence(bo, 0); > + > + mutex_lock(&dev->struct_mutex); > + > + if (!atomic_dec_and_test(&bo->usage)) > + goto out; > + > + if (!bo->fence) { > + list_del_init(&bo->lru); > + if (bo->mem.mm_node) { > + drm_mm_put_block(bo->mem.mm_node); > + if (bo->pinned_node == bo->mem.mm_node) > + bo->pinned_node = NULL; > + bo->mem.mm_node = NULL; > + } > + list_del_init(&bo->pinned_lru); > + if (bo->pinned_node) { > + drm_mm_put_block(bo->pinned_node); > + bo->pinned_node = NULL; > + } > + list_del_init(&bo->ddestroy); > + mutex_unlock(&bo->mutex); > + drm_bo_destroy_locked(bo); > + return; > + } > + > + if (list_empty(&bo->ddestroy)) { > + drm_fence_object_flush(bo->fence, bo->fence_type); > + list_add_tail(&bo->ddestroy, &bm->ddestroy); > + schedule_delayed_work(&bm->wq, > + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); > + } > + > +out: > + mutex_unlock(&bo->mutex); > + return; > +} > + > +/* > + * Verify that refcount is 0 and that there are no internal references > + * to the buffer object. Then destroy it. > + */ > + > +static void drm_bo_destroy_locked(struct drm_buffer_object *bo) > +{ > + struct drm_device *dev = bo->dev; > + struct drm_buffer_manager *bm = &dev->bm; > + > + DRM_ASSERT_LOCKED(&dev->struct_mutex); > + > + DRM_DEBUG("freeing %p\n", bo); > + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && > + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && > + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { > + if (bo->fence != NULL) { > + DRM_ERROR("Fence was non-zero.\n"); > + drm_bo_cleanup_refs(bo, 0); > + return; > + } > + > +#ifdef DRM_ODD_MM_COMPAT > + BUG_ON(!list_empty(&bo->vma_list)); > + BUG_ON(!list_empty(&bo->p_mm_list)); > +#endif > + > + if (bo->ttm) { > + drm_ttm_unbind(bo->ttm); > + drm_ttm_destroy(bo->ttm); > + bo->ttm = NULL; > + } > + > + atomic_dec(&bm->count); > + > + drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); > + > + return; > + } > + > + /* > + * Some stuff is still trying to reference the buffer object. > + * Get rid of those references. > + */ > + > + drm_bo_cleanup_refs(bo, 0); > + > + return; > +} > + > +/* > + * Call dev->struct_mutex locked. > + */ > + > +static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) > +{ > + struct drm_buffer_manager *bm = &dev->bm; > + > + struct drm_buffer_object *entry, *nentry; > + struct list_head *list, *next; > + > + list_for_each_safe(list, next, &bm->ddestroy) { > + entry = list_entry(list, struct drm_buffer_object, ddestroy); > + > + nentry = NULL; > + DRM_DEBUG("bo is %p, %d\n", entry, entry->num_pages); > + if (next != &bm->ddestroy) { > + nentry = list_entry(next, struct drm_buffer_object, > + ddestroy); > + atomic_inc(&nentry->usage); > + } > + > + drm_bo_cleanup_refs(entry, remove_all); > + > + if (nentry) > + atomic_dec(&nentry->usage); > + } > +} > + > +static void drm_bo_delayed_workqueue(struct work_struct *work) > +{ > + struct drm_buffer_manager *bm = > + container_of(work, struct drm_buffer_manager, wq.work); > + struct drm_device *dev = container_of(bm, struct drm_device, bm); > + > + DRM_DEBUG("Delayed delete Worker\n"); > + > + mutex_lock(&dev->struct_mutex); > + if (!bm->initialized) { > + mutex_unlock(&dev->struct_mutex); > + return; > + } > + drm_bo_delayed_delete(dev, 0); > + if (bm->initialized && !list_empty(&bm->ddestroy)) { > + schedule_delayed_work(&bm->wq, > + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); > + } > + mutex_unlock(&dev->struct_mutex); > +} > + > +void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) > +{ > + struct drm_buffer_object *tmp_bo = *bo; > + bo = NULL; > + > + DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); > + > + if (atomic_dec_and_test(&tmp_bo->usage)) > + drm_bo_destroy_locked(tmp_bo); > +} > +EXPORT_SYMBOL(drm_bo_usage_deref_locked); > + > +void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) > +{ > + struct drm_buffer_object *tmp_bo = *bo; > + struct drm_device *dev = tmp_bo->dev; > + > + *bo = NULL; > + if (atomic_dec_and_test(&tmp_bo->usage)) { > + mutex_lock(&dev->struct_mutex); > + if (atomic_read(&tmp_bo->usage) == 0) > + drm_bo_destroy_locked(tmp_bo); > + mutex_unlock(&dev->struct_mutex); > + } > +} > +EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); > + > +void drm_putback_buffer_objects(struct drm_device *dev) > +{ > + struct drm_buffer_manager *bm = &dev->bm; > + struct list_head *list = &bm->unfenced; > + struct drm_buffer_object *entry, *next; > + > + mutex_lock(&dev->struct_mutex); > + list_for_each_entry_safe(entry, next, list, lru) { > + atomic_inc(&entry->usage); > + mutex_unlock(&dev->struct_mutex); > + > + mutex_lock(&entry->mutex); > + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); > + mutex_lock(&dev->struct_mutex); > + > + list_del_init(&entry->lru); > + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); > + wake_up_all(&entry->event_queue); > + > + /* > + * FIXME: Might want to put back on head of list > + * instead of tail here. > + */ > + > + drm_bo_add_to_lru(entry); > + mutex_unlock(&entry->mutex); > + drm_bo_usage_deref_locked(&entry); > + } > + mutex_unlock(&dev->struct_mutex); > +} > +EXPORT_SYMBOL(drm_putback_buffer_objects); > + > +/* > + * Note. The caller has to register (if applicable) > + * and deregister fence object usage. > + */ > + > +int drm_fence_buffer_objects(struct drm_device *dev, > + struct list_head *list, > + uint32_t fence_flags, > + struct drm_fence_object *fence, > + struct drm_fence_object **used_fence) > +{ > + struct drm_buffer_manager *bm = &dev->bm; > + struct drm_buffer_object *entry; > + uint32_t fence_type = 0; > + uint32_t fence_class = ~0; > + int count = 0; > + int ret = 0; > + struct list_head *l; > + > + mutex_lock(&dev->struct_mutex); > + > + if (!list) > + list = &bm->unfenced; > + > + if (fence) > + fence_class = fence->fence_class; > + > + list_for_each_entry(entry, list, lru) { > + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); > + fence_type |= entry->new_fence_type; > + if (fence_class == ~0) > + fence_class = entry->new_fence_class; > + else if (entry->new_fence_class != fence_class) { > + DRM_ERROR("Unmatching fence classes on unfenced list: " > + "%d and %d.\n", > + fence_class, > + entry->new_fence_class); > + ret = -EINVAL; > + goto out; > + } > + count++; > + } > + > + if (!count) { > + ret = -EINVAL; > + goto out; > + } > + > + if (fence) { > + if ((fence_type & fence->type) != fence_type || > + (fence->fence_class != fence_class)) { > + DRM_ERROR("Given fence doesn't match buffers " > + "on unfenced list.\n"); > + ret = -EINVAL; > + goto out; > + } > + } else { > + mutex_unlock(&dev->struct_mutex); > + ret = drm_fence_object_create(dev, fence_class, fence_type, > + fence_flags | DRM_FENCE_FLAG_EMIT, > + &fence); > + mutex_lock(&dev->struct_mutex); > + if (ret) > + goto out; > + } > + > + count = 0; > + l = list->next; > + while (l != list) { > + prefetch(l->next); > + entry = list_entry(l, struct drm_buffer_object, lru); > + atomic_inc(&entry->usage); > + mutex_unlock(&dev->struct_mutex); > + mutex_lock(&entry->mutex); > + mutex_lock(&dev->struct_mutex); > + list_del_init(l); > + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { > + count++; > + if (entry->fence) > + drm_fence_usage_deref_locked(&entry->fence); > + entry->fence = drm_fence_reference_locked(fence); > + entry->fence_class = entry->new_fence_class; > + entry->fence_type = entry->new_fence_type; > + DRM_FLAG_MASKED(entry->priv_flags, 0, > + _DRM_BO_FLAG_UNFENCED); > + wake_up_all(&entry->event_queue); > + drm_bo_add_to_lru(entry); > + } > + mutex_unlock(&entry->mutex); > + drm_bo_usage_deref_locked(&entry); > + l = list->next; > + } > + DRM_DEBUG("Fenced %d buffers\n", count); > +out: > + mutex_unlock(&dev->struct_mutex); > + *used_fence = fence; > + return ret; > +} > +EXPORT_SYMBOL(drm_fence_buffer_objects); > + > +/* > + * bo->mutex locked > + */ > + > +static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, > + int no_wait) > +{ > + int ret = 0; > + struct drm_device *dev = bo->dev; > + struct drm_bo_mem_reg evict_mem; > + > + /* > + * Someone might have modified the buffer before we took the > + * buffer mutex. > + */ > + > + do { > + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; > + > + if (unlikely(bo->mem.flags & > + (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) > + goto out_unlock; > + if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) > + goto out_unlock; > + if (unlikely(bo->mem.mem_type != mem_type)) > + goto out_unlock; > + ret = drm_bo_wait(bo, 0, 1, no_wait, 0); > + if (ret) > + goto out_unlock; > + > + } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); > + > + evict_mem = bo->mem; > + evict_mem.mm_node = NULL; > + > + evict_mem = bo->mem; > + evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo); > + > + mutex_lock(&dev->struct_mutex); > + list_del_init(&bo->lru); > + mutex_unlock(&dev->struct_mutex); > + > + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); > + > + if (ret) { > + if (ret != -EAGAIN) > + DRM_ERROR("Failed to find memory space for " > + "buffer 0x%p eviction.\n", bo); > + goto out; > + } > + > + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); > + > + if (ret) { > + if (ret != -EAGAIN) > + DRM_ERROR("Buffer eviction failed\n"); > + goto out; > + } > + > + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, > + _DRM_BO_FLAG_EVICTED); > + > +out: > + mutex_lock(&dev->struct_mutex); > + if (evict_mem.mm_node) { > + if (evict_mem.mm_node != bo->pinned_node) > + drm_mm_put_block(evict_mem.mm_node); > + evict_mem.mm_node = NULL; > + } > + drm_bo_add_to_lru(bo); > + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); > +out_unlock: > + mutex_unlock(&dev->struct_mutex); > + > + return ret; > +} > + > +/** > + * Repeatedly evict memory from the LRU for @mem_type until we create enough > + * space, or we've evicted everything and there isn't enough space. > + */ > +static int drm_bo_mem_force_space(struct drm_device *dev, > + struct drm_bo_mem_reg *mem, > + uint32_t mem_type, int no_wait) > +{ > + struct drm_mm_node *node; > + struct drm_buffer_manager *bm = &dev->bm; > + struct drm_buffer_object *entry; > + struct drm_mem_type_manager *man = &bm->man[mem_type]; > + struct list_head *lru; > + unsigned long num_pages = mem->num_pages; > + int ret; > + > + mutex_lock(&dev->struct_mutex); > + do { > + node = drm_mm_search_free(&man->manager, num_pages, > + mem->page_alignment, 1); > + if (node) > + break; > + > + lru = &man->lru; > + if (lru->next == lru) > + break; > + > + entry = list_entry(lru->next, struct drm_buffer_object, lru); > + atomic_inc(&entry->usage); > + mutex_unlock(&dev->struct_mutex); > + mutex_lock(&entry->mutex); > + ret = drm_bo_evict(entry, mem_type, no_wait); > + mutex_unlock(&entry->mutex); > + drm_bo_usage_deref_unlocked(&entry); > + if (ret) > + return ret; > + mutex_lock(&dev->struct_mutex); > + } while (1); > + > + if (!node) { > + mutex_unlock(&dev->struct_mutex); > + return -ENOMEM; > + } > + > + node = drm_mm_get_block(node, num_pages, mem->page_alignment); > + if (unlikely(!node)) { > + mutex_unlock(&dev->struct_mutex); > + return -ENOMEM; > + } > + > + mutex_unlock(&dev->struct_mutex); > + mem->mm_node = node; > + mem->mem_type = mem_type; > + return 0; > +} > + > +static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, > + int disallow_fixed, > + uint32_t mem_type, > + uint64_t mask, uint32_t *res_mask) > +{ > + uint64_t cur_flags = drm_bo_type_flags(mem_type); > + uint64_t flag_diff; > + > + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) > + return 0; > + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) > + cur_flags |= DRM_BO_FLAG_CACHED; > + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) > + cur_flags |= DRM_BO_FLAG_MAPPABLE; > + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) > + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); > + > + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) > + return 0; > + > + if (mem_type == DRM_BO_MEM_LOCAL) { > + *res_mask = cur_flags; > + return 1; > + } > + > + flag_diff = (mask ^ cur_flags); > + if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) > + cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; > + > + if ((flag_diff & DRM_BO_FLAG_CACHED) && > + (!(mask & DRM_BO_FLAG_CACHED) || > + (mask & DRM_BO_FLAG_FORCE_CACHING))) > + return 0; > + > + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && > + ((mask & DRM_BO_FLAG_MAPPABLE) || > + (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) > + return 0; > + > + *res_mask = cur_flags; > + return 1; > +} > + > +/** > + * Creates space for memory region @mem according to its type. > + * > + * This function first searches for free space in compatible memory types in > + * the priority order defined by the driver. If free space isn't found, then > + * drm_bo_mem_force_space is attempted in priority order to evict and find > + * space. > + */ > +int drm_bo_mem_space(struct drm_buffer_object *bo, > + struct drm_bo_mem_reg *mem, int no_wait) > +{ > + struct drm_device *dev = bo->dev; > + struct drm_buffer_manager *bm = &dev->bm; > + struct drm_mem_type_manager *man; > + > + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; > + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; > + uint32_t i; > + uint32_t mem_type = DRM_BO_MEM_LOCAL; > + uint32_t cur_flags; > + int type_found = 0; > + int type_ok = 0; > + int has_eagain = 0; > + struct drm_mm_node *node = NULL; > + int ret; > + > + mem->mm_node = NULL; > + for (i = 0; i < num_prios; ++i) { > + mem_type = prios[i]; > + man = &bm->man[mem_type]; > + > + type_ok = drm_bo_mt_compatible(man, > + bo->type == drm_bo_type_user, > + mem_type, mem->proposed_flags, > + &cur_flags); > + > + if (!type_ok) > + continue; > + > + if (mem_type == DRM_BO_MEM_LOCAL) > + break; > + > + if ((mem_type == bo->pinned_mem_type) && > + (bo->pinned_node != NULL)) { > + node = bo->pinned_node; > + break; > + } > + > + mutex_lock(&dev->struct_mutex); > + if (man->has_type && man->use_type) { > + type_found = 1; > + node = drm_mm_search_free(&man->manager, mem->num_pages, > + mem->page_alignment, 1); > + if (node) > + node = drm_mm_get_block(node, mem->num_pages, > + mem->page_alignment); > + } > + mutex_unlock(&dev->struct_mutex); > + if (node) > + break; > + } > + > + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { > + mem->mm_node = node; > + mem->mem_type = mem_type; > + mem->flags = cur_flags; > + return 0; > + } > + > + if (!typ... [truncated message content] |