drm/radeon: embed struct drm_gem_object
Unconditionally initialize the drm gem object - it's not worth the trouble not to for the few kernel objects. This patch only changes the place of the drm gem object, access is still done via pointers. v2: Uncoditionally align the size in radeon_bo_create. At least the r600/evergreen blit code didn't to this, angering the paranoid gem code. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
ae0cec2880
commit
441921d530
14 changed files with 39 additions and 37 deletions
|
@ -572,7 +572,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
||||||
obj_size += evergreen_ps_size * 4;
|
obj_size += evergreen_ps_size * 4;
|
||||||
obj_size = ALIGN(obj_size, 256);
|
obj_size = ALIGN(obj_size, 256);
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->r600_blit.shader_obj);
|
&rdev->r600_blit.shader_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||||
|
|
|
@ -2728,7 +2728,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
|
||||||
|
|
||||||
/* Allocate ring buffer */
|
/* Allocate ring buffer */
|
||||||
if (rdev->ih.ring_obj == NULL) {
|
if (rdev->ih.ring_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
|
r = radeon_bo_create(rdev, rdev->ih.ring_size,
|
||||||
PAGE_SIZE, true,
|
PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT,
|
RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->ih.ring_obj);
|
&rdev->ih.ring_obj);
|
||||||
|
|
|
@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
|
||||||
obj_size += r6xx_ps_size * 4;
|
obj_size += r6xx_ps_size * 4;
|
||||||
obj_size = ALIGN(obj_size, 256);
|
obj_size = ALIGN(obj_size, 256);
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->r600_blit.shader_obj);
|
&rdev->r600_blit.shader_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("r600 failed to allocate shader\n");
|
DRM_ERROR("r600 failed to allocate shader\n");
|
||||||
|
|
|
@ -259,6 +259,7 @@ struct radeon_bo {
|
||||||
/* Constant after initialization */
|
/* Constant after initialization */
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
|
struct drm_gem_object gem_base;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_bo_list {
|
struct radeon_bo_list {
|
||||||
|
|
|
@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
||||||
|
|
||||||
size = bsize;
|
size = bsize;
|
||||||
n = 1024;
|
n = 1024;
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (rdev->wb.wb_obj == NULL) {
|
if (rdev->wb.wb_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||||
|
|
|
@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (rdev->gart.table.vram.robj == NULL) {
|
if (rdev->gart.table.vram.robj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
|
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->gart.table.vram.robj);
|
&rdev->gart.table.vram.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -32,7 +32,8 @@
|
||||||
|
|
||||||
int radeon_gem_object_init(struct drm_gem_object *obj)
|
int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
/* we do nothings here */
|
BUG();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,9 +45,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||||
if (robj) {
|
if (robj) {
|
||||||
radeon_bo_unref(&robj);
|
radeon_bo_unref(&robj);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_gem_object_release(gobj);
|
|
||||||
kfree(gobj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||||
|
@ -54,29 +52,27 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||||
bool discardable, bool kernel,
|
bool discardable, bool kernel,
|
||||||
struct drm_gem_object **obj)
|
struct drm_gem_object **obj)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *gobj;
|
|
||||||
struct radeon_bo *robj;
|
struct radeon_bo *robj;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
*obj = NULL;
|
*obj = NULL;
|
||||||
gobj = drm_gem_object_alloc(rdev->ddev, size);
|
|
||||||
if (!gobj) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
/* At least align on page size */
|
/* At least align on page size */
|
||||||
if (alignment < PAGE_SIZE) {
|
if (alignment < PAGE_SIZE) {
|
||||||
alignment = PAGE_SIZE;
|
alignment = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
|
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
if (r != -ERESTARTSYS)
|
if (r != -ERESTARTSYS)
|
||||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||||
size, initial_domain, alignment, r);
|
size, initial_domain, alignment, r);
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
gobj->driver_private = robj;
|
*obj = &robj->gem_base;
|
||||||
*obj = gobj;
|
|
||||||
|
mutex_lock(&rdev->gem.mutex);
|
||||||
|
list_add_tail(&robj->list, &rdev->gem.objects);
|
||||||
|
mutex_unlock(&rdev->gem.mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||||
list_del_init(&bo->list);
|
list_del_init(&bo->list);
|
||||||
mutex_unlock(&bo->rdev->gem.mutex);
|
mutex_unlock(&bo->rdev->gem.mutex);
|
||||||
radeon_bo_clear_surface_reg(bo);
|
radeon_bo_clear_surface_reg(bo);
|
||||||
|
drm_gem_object_release(&bo->gem_base);
|
||||||
kfree(bo);
|
kfree(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||||
rbo->placement.num_busy_placement = c;
|
rbo->placement.num_busy_placement = c;
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
int radeon_bo_create(struct radeon_device *rdev,
|
||||||
unsigned long size, int byte_align, bool kernel, u32 domain,
|
unsigned long size, int byte_align, bool kernel, u32 domain,
|
||||||
struct radeon_bo **bo_ptr)
|
struct radeon_bo **bo_ptr)
|
||||||
{
|
{
|
||||||
|
@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
||||||
unsigned long max_size = 0;
|
unsigned long max_size = 0;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
size = ALIGN(size, PAGE_SIZE);
|
||||||
|
|
||||||
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
||||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||||
}
|
}
|
||||||
|
@ -118,8 +121,14 @@ retry:
|
||||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
|
||||||
|
if (unlikely(r)) {
|
||||||
|
kfree(bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
bo->rdev = rdev;
|
bo->rdev = rdev;
|
||||||
bo->gobj = gobj;
|
bo->gobj = &bo->gem_base;
|
||||||
|
bo->gem_base.driver_private = bo;
|
||||||
bo->surface_reg = -1;
|
bo->surface_reg = -1;
|
||||||
INIT_LIST_HEAD(&bo->list);
|
INIT_LIST_HEAD(&bo->list);
|
||||||
radeon_ttm_placement_from_domain(bo, domain);
|
radeon_ttm_placement_from_domain(bo, domain);
|
||||||
|
@ -142,12 +151,9 @@ retry:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
*bo_ptr = bo;
|
*bo_ptr = bo;
|
||||||
if (gobj) {
|
|
||||||
mutex_lock(&bo->rdev->gem.mutex);
|
|
||||||
list_add_tail(&bo->list, &rdev->gem.objects);
|
|
||||||
mutex_unlock(&bo->rdev->gem.mutex);
|
|
||||||
}
|
|
||||||
trace_radeon_bo_create(bo);
|
trace_radeon_bo_create(bo);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int radeon_bo_create(struct radeon_device *rdev,
|
extern int radeon_bo_create(struct radeon_device *rdev,
|
||||||
struct drm_gem_object *gobj, unsigned long size,
|
unsigned long size, int byte_align,
|
||||||
int byte_align,
|
bool kernel, u32 domain,
|
||||||
bool kernel, u32 domain,
|
struct radeon_bo **bo_ptr);
|
||||||
struct radeon_bo **bo_ptr);
|
|
||||||
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
||||||
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
||||||
extern void radeon_bo_unref(struct radeon_bo **bo);
|
extern void radeon_bo_unref(struct radeon_bo **bo);
|
||||||
|
|
|
@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||||
return 0;
|
return 0;
|
||||||
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
|
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
|
||||||
/* Allocate 1M object buffer */
|
/* Allocate 1M object buffer */
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->ib_pool.robj);
|
&rdev->ib_pool.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
rdev->cp.ring_size = ring_size;
|
rdev->cp.ring_size = ring_size;
|
||||||
/* Allocate ring buffer */
|
/* Allocate ring buffer */
|
||||||
if (rdev->cp.ring_obj == NULL) {
|
if (rdev->cp.ring_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT,
|
RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->cp.ring_obj);
|
&rdev->cp.ring_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&vram_obj);
|
&vram_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create VRAM object\n");
|
DRM_ERROR("Failed to create VRAM object\n");
|
||||||
|
@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||||
void **gtt_start, **gtt_end;
|
void **gtt_start, **gtt_end;
|
||||||
void **vram_start, **vram_end;
|
void **vram_start, **vram_end;
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
|
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||||
|
|
|
@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_VRAM,
|
RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->stollen_vga_memory);
|
&rdev->stollen_vga_memory);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -999,7 +999,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
|
||||||
u64 gpu_addr;
|
u64 gpu_addr;
|
||||||
|
|
||||||
if (rdev->vram_scratch.robj == NULL) {
|
if (rdev->vram_scratch.robj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
|
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->vram_scratch.robj);
|
&rdev->vram_scratch.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
Loading…
Reference in a new issue