Date: Sun, 26 Oct 2014 16:24:42 +0100 From: Tijl Coosemans <tijl@FreeBSD.org> To: x11@FreeBSD.org, kib@FreeBSD.org, dumbbell@FreeBSD.org Subject: [rfc] Radeon AGP support patches Message-ID: <20141026162442.1330d4c3@kalimero.tijl.coosemans.org>
next in thread | raw e-mail | index | archive | help
[-- Attachment #1 --]
Hi,
I worked on AGP support for Radeon cards this week. Please take a look
at the attached patches.
Patch 1:
Adds support for AGP_USER_TYPES to sys/dev/agp. For normal memory types
a vm_object is allocated, for user types only a vm_page array. It is
then up to the caller (e.g. TTM code) to manage this array. Arbitrary
pages can be mapped into the GTT this way.
Patch 2:
This isn't needed for AGP support but it's something I ran into when
chasing a bug. The handle field in our drm_local_map_t is different
than in Linux. On Linux this field contains a unique ID for user space
maps and a virtual address for kernel space maps. On FreeBSD this
field always contains a unique ID and the virtual address of kernel
space maps is stored in another field named virtual. See the old DRM
code in sys/dev/drm. I think most of the changes in the patch are in
dead code (unless the driver still supports running old DRMv1 X servers).
Patch 3:
Enable AGP support in sys/dev/drm2. In PCI mode the GTT exists on the
graphics card so when accessing system memory it already does its own
virtual address translation and only physical addresses appear on the
system bus. The CPU can access the same addresses with its own VM
system like it always does. In AGP mode, translation is done by the
AGP chipset so fictitious addresses appear on the system bus. For the
CPU cache management to work correctly it needs to use these same
fictitious addresses instead of using the real physical addresses
directly. The patch marks the AGP aperture range fictitious in
radeon_device.c where the VRAM aperture is also marked fictitious such
that PHYS_TO_VM_PAGE in ttm_bo_vm_fault works for addresses in this
range.
The rest of the patch is mostly porting to our agp_* API. It also
fixes two memory leaks in ttm_agp_backend.c. One is a missing free in
ttm_agp_tt_create. The other is because ttm_agp_bind allocates an
agp_memory struct but ttm_agp_unbind does not free it. So when calling
ttm_agp_bind a second time the reference to the struct is lost. The
patch changes ttm_agp_bind so the allocation only happens in the first
call. The struct is released in ttm_agp_destroy.
The changes to ttm_page_alloc.c reduce the diff with Linux.
Patch 4:
Something else I ran into. In function ttm_bo_kmap_ttm when mapping a
TTM buffer object in kernel space that may be cached (TTM_PL_FLAG_CACHED)
use normal WB caching like Linux does instead of WC. I think this may be
a copy-paste error from similar looking code in ttm_bo_ioremap that is
defined right above this function.
[-- Attachment #2 --]
Index: sys/dev/agp/agp.c
===================================================================
--- sys/dev/agp/agp.c (revision 273255)
+++ sys/dev/agp/agp.c (working copy)
@@ -483,29 +483,36 @@ agp_generic_alloc_memory(device_t dev, i
struct agp_memory *mem;
if ((size & (AGP_PAGE_SIZE - 1)) != 0)
- return 0;
+ return (NULL);
- if (sc->as_allocated + size > sc->as_maxmem)
- return 0;
+ if (size > sc->as_maxmem - sc->as_allocated)
+ return (NULL);
- if (type != 0) {
+ if (type >= AGP_USER_TYPES) {
+ mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
+ mem->am_pages = malloc(atop(round_page(size)) *
+ sizeof(*mem->am_pages), M_AGP,
+ M_WAITOK | M_ZERO);
+ } else if (type == AGP_NORMAL_MEMORY) {
+ mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
+ mem->am_obj = vm_object_allocate(OBJT_DEFAULT,
+ atop(round_page(size)));
+ } else {
printf("agp_generic_alloc_memory: unsupported type %d\n",
type);
- return 0;
+ return (NULL);
}
- mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
mem->am_id = sc->as_nextid++;
mem->am_size = size;
- mem->am_type = 0;
- mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
+ mem->am_type = type;
mem->am_physical = 0;
mem->am_offset = 0;
mem->am_is_bound = 0;
TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
sc->as_allocated += size;
- return mem;
+ return (mem);
}
int
@@ -518,7 +525,10 @@ agp_generic_free_memory(device_t dev, st
sc->as_allocated -= mem->am_size;
TAILQ_REMOVE(&sc->as_memory, mem, am_link);
- vm_object_deallocate(mem->am_obj);
+ if (mem->am_type >= AGP_USER_TYPES)
+ free(mem->am_pages, M_AGP);
+ else
+ vm_object_deallocate(mem->am_obj);
free(mem, M_AGP);
return 0;
}
@@ -537,7 +547,46 @@ agp_generic_bind_memory(device_t dev, st
offset + mem->am_size > AGP_GET_APERTURE(dev)) {
device_printf(dev, "binding memory at bad offset %#x\n",
(int)offset);
- return EINVAL;
+ return (EINVAL);
+ }
+
+ if (mem->am_type >= AGP_USER_TYPES) {
+ mtx_lock(&sc->as_lock);
+ if (mem->am_is_bound) {
+ device_printf(dev, "memory already bound\n");
+ mtx_unlock(&sc->as_lock);
+ return (EINVAL);
+ }
+ for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
+ m = mem->am_pages[i >> PAGE_SHIFT];
+
+ /*
+ * Install entries in the GATT, making sure that if
+ * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
+ * aligned to PAGE_SIZE, we don't modify too many GATT
+ * entries.
+ */
+ for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
+ j += AGP_PAGE_SIZE) {
+ vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
+ AGP_DPF("binding offset %#jx to pa %#jx\n",
+ (uintmax_t)offset + i + j,
+ (uintmax_t)pa);
+ error = AGP_BIND_PAGE(dev, offset + i + j, pa);
+ if (error) {
+ /*
+ * Bail out. Reverse all the mappings.
+ */
+ for (k = 0; k < i + j;
+ k += AGP_PAGE_SIZE)
+ AGP_UNBIND_PAGE(dev,
+ offset + k);
+ mtx_unlock(&sc->as_lock);
+ return (error);
+ }
+ }
+ }
+ goto done;
}
/*
@@ -604,6 +653,7 @@ agp_generic_bind_memory(device_t dev, st
}
VM_OBJECT_WUNLOCK(mem->am_obj);
+done:
/*
* Flush the cpu cache since we are providing a new mapping
* for these pages.
@@ -620,7 +670,7 @@ agp_generic_bind_memory(device_t dev, st
mtx_unlock(&sc->as_lock);
- return 0;
+ return (0);
bad:
mtx_unlock(&sc->as_lock);
VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
@@ -634,7 +684,7 @@ bad:
}
VM_OBJECT_WUNLOCK(mem->am_obj);
- return error;
+ return (error);
}
int
@@ -659,15 +709,16 @@ agp_generic_unbind_memory(device_t dev,
*/
for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
AGP_UNBIND_PAGE(dev, mem->am_offset + i);
- VM_OBJECT_WLOCK(mem->am_obj);
- for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
- m = vm_page_lookup(mem->am_obj, atop(i));
- vm_page_lock(m);
- vm_page_unwire(m, PQ_INACTIVE);
- vm_page_unlock(m);
+ if (mem->am_type < AGP_USER_TYPES) {
+ VM_OBJECT_WLOCK(mem->am_obj);
+ for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
+ m = vm_page_lookup(mem->am_obj, atop(i));
+ vm_page_lock(m);
+ vm_page_unwire(m, PQ_INACTIVE);
+ vm_page_unlock(m);
+ }
+ VM_OBJECT_WUNLOCK(mem->am_obj);
}
- VM_OBJECT_WUNLOCK(mem->am_obj);
-
agp_flush_cache();
AGP_FLUSH_TLB(dev);
@@ -756,6 +807,9 @@ agp_allocate_user(device_t dev, agp_allo
{
struct agp_memory *mem;
+ if (alloc->type >= AGP_USER_TYPES)
+ return EINVAL;
+
mem = AGP_ALLOC_MEMORY(dev,
alloc->type,
alloc->pg_count << AGP_PAGE_SHIFT);
Index: sys/dev/agp/agppriv.h
===================================================================
--- sys/dev/agp/agppriv.h (revision 273255)
+++ sys/dev/agp/agppriv.h (working copy)
@@ -55,7 +55,10 @@ struct agp_memory {
int am_id; /* unique id for block */
vm_size_t am_size; /* number of bytes allocated */
int am_type; /* chipset specific type */
- struct vm_object *am_obj; /* VM object owning pages */
+ union {
+ struct vm_object *am_obj; /* VM object owning pages */
+ struct vm_page **am_pages; /* user types pages */
+ };
vm_offset_t am_physical; /* bogus hack for i810 */
vm_offset_t am_offset; /* page offset if bound */
int am_is_bound; /* non-zero if bound */
[-- Attachment #3 --]
Index: sys/dev/drm2/ati_pcigart.c
===================================================================
--- sys/dev/drm2/ati_pcigart.c (revision 273255)
+++ sys/dev/drm2/ati_pcigart.c (working copy)
@@ -152,7 +152,7 @@ int drm_ati_pcigart_init(struct drm_devi
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
memset(pci_gart, 0, max_ati_pages * sizeof(u32));
} else {
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+ memset_io((void __iomem *)map->virtual, 0, max_ati_pages * sizeof(u32));
}
gart_idx = 0;
Index: sys/dev/drm2/radeon/r600_blit.c
===================================================================
--- sys/dev/drm2/radeon/r600_blit.c (revision 273255)
+++ sys/dev/drm2/radeon/r600_blit.c (working copy)
@@ -136,8 +136,8 @@ set_shaders(struct drm_device *dev)
DRM_DEBUG("\n");
/* load shaders */
- vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
- ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
+ vs = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset);
+ ps = (u32 *) ((char *)dev->agp_buffer_map->virtual + dev_priv->blit_vb->offset + 256);
for (i = 0; i < r6xx_vs_size; i++)
vs[i] = cpu_to_le32(r6xx_vs[i]);
@@ -545,7 +545,7 @@ static void r600_nomm_put_vb(struct drm_
static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- return (((char *)dev->agp_buffer_map->handle +
+ return (((char *)dev->agp_buffer_map->virtual +
dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
}
Index: sys/dev/drm2/radeon/r600_cp.c
===================================================================
--- sys/dev/drm2/radeon/r600_cp.c (revision 273255)
+++ sys/dev/drm2/radeon/r600_cp.c (working copy)
@@ -176,7 +176,7 @@ int r600_page_table_init(struct drm_devi
pages = (entry->pages <= max_real_pages) ?
entry->pages : max_real_pages;
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
+ memset_io((void __iomem *)map->virtual, 0, max_ati_pages * sizeof(u64));
gart_idx = 0;
for (i = 0; i < pages; i++) {
@@ -2074,14 +2074,13 @@ int r600_do_init_cp(struct drm_device *d
}
#if __OS_HAS_AGP
- /* XXX */
if (dev_priv->flags & RADEON_IS_AGP) {
drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
- if (!dev_priv->cp_ring->handle ||
- !dev_priv->ring_rptr->handle ||
- !dev->agp_buffer_map->handle) {
+ if (!dev_priv->cp_ring->virtual ||
+ !dev_priv->ring_rptr->virtual ||
+ !dev->agp_buffer_map->virtual) {
DRM_ERROR("could not find ioremap agp regions!\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
@@ -2089,18 +2088,18 @@ int r600_do_init_cp(struct drm_device *d
} else
#endif
{
- dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
- dev_priv->ring_rptr->handle =
+ dev_priv->cp_ring->virtual = (void *)(unsigned long)dev_priv->cp_ring->offset;
+ dev_priv->ring_rptr->virtual =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
- dev->agp_buffer_map->handle =
+ dev->agp_buffer_map->virtual =
(void *)(unsigned long)dev->agp_buffer_map->offset;
- DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
- dev_priv->cp_ring->handle);
- DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
- dev_priv->ring_rptr->handle);
- DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
- dev->agp_buffer_map->handle);
+ DRM_DEBUG("dev_priv->cp_ring->virtual %p\n",
+ dev_priv->cp_ring->virtual);
+ DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n",
+ dev_priv->ring_rptr->virtual);
+ DRM_DEBUG("dev->agp_buffer_map->virtual %p\n",
+ dev->agp_buffer_map->virtual);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
@@ -2180,8 +2179,8 @@ int r600_do_init_cp(struct drm_device *d
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
dev_priv->gart_buffers_offset);
- dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
- dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual;
+ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
@@ -2220,14 +2219,14 @@ int r600_do_init_cp(struct drm_device *d
dev_priv->gart_info.table_size;
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
- if (!dev_priv->gart_info.mapping.handle) {
+ if (!dev_priv->gart_info.mapping.virtual) {
DRM_ERROR("ioremap failed.\n");
r600_do_cleanup_cp(dev);
return -EINVAL;
}
dev_priv->gart_info.addr =
- dev_priv->gart_info.mapping.handle;
+ dev_priv->gart_info.mapping.virtual;
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
@@ -2381,7 +2380,7 @@ int r600_cp_dispatch_indirect(struct drm
*/
while (dwords & 0xf) {
u32 *data = (u32 *)
- ((char *)dev->agp_buffer_map->handle
+ ((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[dwords++] = RADEON_CP_PACKET2;
}
@@ -2510,7 +2509,7 @@ int r600_cp_dispatch_texture(struct drm_
/* Dispatch the indirect buffer.
*/
buffer =
- (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+ (u32 *) ((char *)dev->agp_buffer_map->virtual + buf->offset);
if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
@@ -2619,7 +2618,7 @@ int r600_cs_legacy_ioctl(struct drm_devi
DRM_ERROR("ib_get failed\n");
goto out;
}
- ib = (u32 *)((uintptr_t)dev->agp_buffer_map->handle + buf->offset);
+ ib = (u32 *)((uintptr_t)dev->agp_buffer_map->virtual + buf->offset);
/* now parse command stream */
r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
if (r) {
Index: sys/dev/drm2/radeon/radeon_cp.c
===================================================================
--- sys/dev/drm2/radeon/radeon_cp.c (revision 273255)
+++ sys/dev/drm2/radeon/radeon_cp.c (working copy)
@@ -64,7 +64,7 @@ u32 radeon_read_ring_rptr(drm_radeon_pri
val = DRM_READ32(dev_priv->ring_rptr, off);
} else {
val = *(((volatile u32 *)
- dev_priv->ring_rptr->handle) +
+ dev_priv->ring_rptr->virtual) +
(off / sizeof(u32)));
val = le32_to_cpu(val);
}
@@ -88,7 +88,7 @@ void radeon_write_ring_rptr(drm_radeon_p
if (dev_priv->flags & RADEON_IS_AGP)
DRM_WRITE32(dev_priv->ring_rptr, off, val);
else
- *(((volatile u32 *) dev_priv->ring_rptr->handle) +
+ *(((volatile u32 *) dev_priv->ring_rptr->virtual) +
(off / sizeof(u32))) = cpu_to_le32(val);
}
@@ -1330,9 +1330,9 @@ static int radeon_do_init_cp(struct drm_
drm_core_ioremap_wc(dev_priv->cp_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
- if (!dev_priv->cp_ring->handle ||
- !dev_priv->ring_rptr->handle ||
- !dev->agp_buffer_map->handle) {
+ if (!dev_priv->cp_ring->virtual ||
+ !dev_priv->ring_rptr->virtual ||
+ !dev->agp_buffer_map->virtual) {
DRM_ERROR("could not find ioremap agp regions!\n");
radeon_do_cleanup_cp(dev);
return -EINVAL;
@@ -1340,19 +1340,19 @@ static int radeon_do_init_cp(struct drm_
} else
#endif
{
- dev_priv->cp_ring->handle =
+ dev_priv->cp_ring->virtual =
(void *)(unsigned long)dev_priv->cp_ring->offset;
- dev_priv->ring_rptr->handle =
+ dev_priv->ring_rptr->virtual =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
- dev->agp_buffer_map->handle =
+ dev->agp_buffer_map->virtual =
(void *)(unsigned long)dev->agp_buffer_map->offset;
- DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
- dev_priv->cp_ring->handle);
- DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
- dev_priv->ring_rptr->handle);
- DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
- dev->agp_buffer_map->handle);
+ DRM_DEBUG("dev_priv->cp_ring->virtual %p\n",
+ dev_priv->cp_ring->virtual);
+ DRM_DEBUG("dev_priv->ring_rptr->virtual %p\n",
+ dev_priv->ring_rptr->virtual);
+ DRM_DEBUG("dev->agp_buffer_map->virtual %p\n",
+ dev->agp_buffer_map->virtual);
}
dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
@@ -1430,8 +1430,8 @@ static int radeon_do_init_cp(struct drm_
DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
dev_priv->gart_buffers_offset);
- dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
- dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ dev_priv->ring.start = (u32 *) dev_priv->cp_ring->virtual;
+ dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
@@ -1467,7 +1467,7 @@ static int radeon_do_init_cp(struct drm_
drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
- dev_priv->gart_info.mapping.handle;
+ dev_priv->gart_info.mapping.virtual;
if (dev_priv->flags & RADEON_IS_PCIE)
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
@@ -2139,7 +2139,7 @@ int radeon_master_create(struct drm_devi
free(master_priv, DRM_MEM_DRIVER);
return ret;
}
- master_priv->sarea_priv = (drm_radeon_sarea_t *)((char *)master_priv->sarea->handle) +
+ master_priv->sarea_priv = (drm_radeon_sarea_t *)((char *)master_priv->sarea->virtual) +
sizeof(struct drm_sarea);
master_priv->sarea_priv->pfCurrentPage = 0;
Index: sys/dev/drm2/radeon/radeon_state.c
===================================================================
--- sys/dev/drm2/radeon/radeon_state.c (revision 273255)
+++ sys/dev/drm2/radeon/radeon_state.c (working copy)
@@ -1453,7 +1453,7 @@ void radeon_cp_dispatch_flip(struct drm_
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = master->driver_priv;
- struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+ struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->virtual;
int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
@@ -1618,7 +1618,7 @@ static void radeon_cp_dispatch_indirect(
*/
if (dwords & 1) {
u32 *data = (u32 *)
- ((char *)dev->agp_buffer_map->handle
+ ((char *)dev->agp_buffer_map->virtual
+ buf->offset + start);
data[dwords++] = RADEON_CP_PACKET2;
}
@@ -1667,7 +1667,7 @@ static void radeon_cp_dispatch_indices(s
dwords = (prim->finish - prim->start + 3) / sizeof(u32);
- data = (u32 *) ((char *)dev->agp_buffer_map->handle +
+ data = (u32 *) ((char *)dev->agp_buffer_map->virtual +
elt_buf->offset + prim->start);
data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
@@ -1819,7 +1819,7 @@ static int radeon_cp_dispatch_texture(st
/* Dispatch the indirect buffer.
*/
buffer =
- (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+ (u32 *) ((char *)dev->agp_buffer_map->virtual + buf->offset);
dwords = size / 4;
#define RADEON_COPY_MT(_buf, _data, _width) \
[-- Attachment #4 --]
Index: sys/dev/drm2/drm_agpsupport.c
===================================================================
--- sys/dev/drm2/drm_agpsupport.c (revision 273255)
+++ sys/dev/drm2/drm_agpsupport.c (working copy)
@@ -396,7 +396,7 @@ void *drm_agp_allocate_memory(size_t pag
if (!agpdev)
return NULL;
- return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
+ return agp_alloc_memory(agpdev, type, pages << PAGE_SHIFT);
}
int drm_agp_free_memory(void *handle)
Index: sys/dev/drm2/radeon/radeon.h
===================================================================
--- sys/dev/drm2/radeon/radeon.h (revision 273255)
+++ sys/dev/drm2/radeon/radeon.h (working copy)
@@ -1618,6 +1618,7 @@ struct radeon_device {
bool need_dma32;
bool accel_working;
bool fictitious_range_registered;
+ bool fictitious_agp_range_registered;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
Index: sys/dev/drm2/radeon/radeon_device.c
===================================================================
--- sys/dev/drm2/radeon/radeon_device.c (revision 273255)
+++ sys/dev/drm2/radeon/radeon_device.c (working copy)
@@ -1014,6 +1014,7 @@ int radeon_device_init(struct radeon_dev
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
rdev->fictitious_range_registered = false;
+ rdev->fictitious_agp_range_registered = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
@@ -1168,6 +1169,24 @@ int radeon_device_init(struct radeon_dev
return (-r);
}
rdev->fictitious_range_registered = true;
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ DRM_INFO("%s: Taking over the fictitious range 0x%jx-0x%jx\n",
+ __func__, (uintmax_t)rdev->mc.agp_base,
+ (uintmax_t)rdev->mc.agp_base + rdev->mc.gtt_size);
+ r = vm_phys_fictitious_reg_range(
+ rdev->mc.agp_base,
+ rdev->mc.agp_base + rdev->mc.gtt_size,
+ VM_MEMATTR_WRITE_COMBINING);
+ if (r != 0) {
+ DRM_ERROR("Failed to register fictitious range "
+ "0x%jx-0x%jx (%d).\n", (uintmax_t)rdev->mc.agp_base,
+ (uintmax_t)rdev->mc.agp_base + rdev->mc.gtt_size, r);
+ return (-r);
+ }
+ rdev->fictitious_agp_range_registered = true;
+ }
+#endif
if ((radeon_testing & 1)) {
radeon_test_moves(rdev);
@@ -1205,6 +1224,13 @@ void radeon_device_fini(struct radeon_de
rdev->mc.aper_base,
rdev->mc.aper_base + rdev->mc.visible_vram_size);
}
+#if __OS_HAS_AGP
+ if (rdev->fictitious_agp_range_registered) {
+ vm_phys_fictitious_unreg_range(
+ rdev->mc.agp_base,
+ rdev->mc.agp_base + rdev->mc.gtt_size);
+ }
+#endif
radeon_fini(rdev);
#ifdef DUMBBELL_WIP
Index: sys/dev/drm2/radeon/radeon_ttm.c
===================================================================
--- sys/dev/drm2/radeon/radeon_ttm.c (revision 273255)
+++ sys/dev/drm2/radeon/radeon_ttm.c (working copy)
@@ -560,12 +560,10 @@ static struct ttm_tt *radeon_ttm_tt_crea
rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
-#ifdef DUMBBELL_WIP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_create(bdev, rdev->ddev->agp->agpdev,
size, page_flags, dummy_read_page);
}
-#endif /* DUMBBELL_WIP */
#endif
gtt = malloc(sizeof(struct radeon_ttm_tt),
@@ -610,11 +608,9 @@ static int radeon_ttm_tt_populate(struct
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
-#ifdef DUMBBELL_WIP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm);
}
-#endif /* DUMBBELL_WIP */
#endif
#ifdef CONFIG_SWIOTLB
@@ -660,12 +656,10 @@ static void radeon_ttm_tt_unpopulate(str
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
-#ifdef DUMBBELL_WIP
if (rdev->flags & RADEON_IS_AGP) {
ttm_agp_tt_unpopulate(ttm);
return;
}
-#endif /* DUMBBELL_WIP */
#endif
#ifdef CONFIG_SWIOTLB
Index: sys/dev/drm2/ttm/ttm_agp_backend.c
===================================================================
--- sys/dev/drm2/ttm/ttm_agp_backend.c (revision 273255)
+++ sys/dev/drm2/ttm/ttm_agp_backend.c (working copy)
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/ttm/ttm_page_alloc.h>
#ifdef TTM_HAS_AGP
#include <dev/drm2/ttm/ttm_placement.h>
+#include <dev/agp/agppriv.h>
struct ttm_agp_backend {
struct ttm_tt ttm;
@@ -55,27 +56,31 @@ static int ttm_agp_bind(struct ttm_tt *t
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
unsigned i;
- mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY, ttm->num_pages);
- if (unlikely(mem == NULL))
- return -ENOMEM;
+ mem = agp_be->mem;
+ if (mem == NULL) {
+ mem = agp_alloc_memory(agp_be->bridge,
+ (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY,
+ ttm->num_pages * PAGE_SIZE);
+ if (unlikely(mem == NULL))
+ return -ENOMEM;
+ } else if (mem->am_size != ttm->num_pages * PAGE_SIZE) {
+ printf("[TTM] size changed on AGP rebind\n");
+ return -EINVAL;
+ }
- mem->page_count = 0;
for (i = 0; i < ttm->num_pages; i++) {
vm_page_t page = ttm->pages[i];
if (!page)
page = ttm->dummy_read_page;
- mem->pages[mem->page_count++] = page;
+ mem->am_pages[i] = page;
}
agp_be->mem = mem;
- mem->is_flushed = 1;
- mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
-
- ret = agp_bind_memory(mem, node->start);
+ ret = -agp_bind_memory(agp_be->bridge, mem, node->start * PAGE_SIZE);
if (ret)
- pr_err("AGP Bind memory failed\n");
+ printf("[TTM] AGP Bind memory failed\n");
return ret;
}
@@ -85,9 +90,9 @@ static int ttm_agp_unbind(struct ttm_tt
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem) {
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
- agp_free_memory(agp_be->mem);
+ if (agp_be->mem->am_is_bound)
+ return -agp_unbind_memory(agp_be->bridge, agp_be->mem);
+ agp_free_memory(agp_be->bridge, agp_be->mem);
agp_be->mem = NULL;
}
return 0;
@@ -123,6 +128,7 @@ struct ttm_tt *ttm_agp_tt_create(struct
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ free(agp_be, M_TTM_AGP);
return NULL;
}
Index: sys/dev/drm2/ttm/ttm_bo_driver.h
===================================================================
--- sys/dev/drm2/ttm/ttm_bo_driver.h (revision 273255)
+++ sys/dev/drm2/ttm/ttm_bo_driver.h (working copy)
@@ -990,9 +990,8 @@ extern vm_memattr_t ttm_io_prot(uint32_t
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
-#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#if __OS_HAS_AGP
#define TTM_HAS_AGP
-#include <linux/agp_backend.h>
/**
* ttm_agp_tt_create
@@ -1009,7 +1008,7 @@ extern const struct ttm_mem_type_manager
* bind and unbind memory backing a ttm_tt.
*/
extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge,
+ device_t bridge,
unsigned long size, uint32_t page_flags,
struct vm_page *dummy_read_page);
int ttm_agp_tt_populate(struct ttm_tt *ttm);
Index: sys/dev/drm2/ttm/ttm_page_alloc.c
===================================================================
--- sys/dev/drm2/ttm/ttm_page_alloc.c (revision 273255)
+++ sys/dev/drm2/ttm/ttm_page_alloc.c (working copy)
@@ -45,10 +45,6 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/ttm/ttm_bo_driver.h>
#include <dev/drm2/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
-#include <asm/agp.h>
-#endif
-
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
#define SMALL_ALLOCATION 16
#define FREE_ALL_PAGES (~0U)
@@ -220,46 +216,34 @@ static struct ttm_pool_manager *_manager
static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
{
- vm_page_t m;
+#ifdef TTM_HAS_AGP
int i;
- for (i = 0; i < addrinarray; i++) {
- m = pages[i];
-#ifdef TTM_HAS_AGP
- unmap_page_from_agp(m);
+ for (i = 0; i < addrinarray; i++)
+ pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK);
#endif
- pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
- }
return 0;
}
static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
{
- vm_page_t m;
+#ifdef TTM_HAS_AGP
int i;
- for (i = 0; i < addrinarray; i++) {
- m = pages[i];
-#ifdef TTM_HAS_AGP
- map_page_into_agp(pages[i]);
+ for (i = 0; i < addrinarray; i++)
+ pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING);
#endif
- pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
- }
return 0;
}
static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
{
- vm_page_t m;
+#ifdef TTM_HAS_AGP
int i;
- for (i = 0; i < addrinarray; i++) {
- m = pages[i];
-#ifdef TTM_HAS_AGP
- map_page_into_agp(pages[i]);
+ for (i = 0; i < addrinarray; i++)
+ pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE);
#endif
- pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
- }
return 0;
}
[-- Attachment #5 --]
Index: sys/dev/drm2/ttm/ttm_bo_util.c
===================================================================
--- sys/dev/drm2/ttm/ttm_bo_util.c (revision 273255)
+++ sys/dev/drm2/ttm/ttm_bo_util.c (working copy)
@@ -508,7 +508,7 @@ static int ttm_bo_kmap_ttm(struct ttm_bu
* or to make the buffer object look contiguous.
*/
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- VM_MEMATTR_WRITE_COMBINING :
+ VM_MEMATTR_WRITE_BACK :
ttm_io_prot(mem->placement);
map->bo_kmap_type = ttm_bo_map_vmap;
map->num_pages = num_pages;
Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?20141026162442.1330d4c3>
