linux-hardened/drivers/gpu/host1x/drm/gem.c
David Herrmann 0de23977cf drm/gem: convert to new unified vma manager
Use the new vma manager instead of the old hashtable. Also convert all
drivers to use the new convenience helpers. This drops all the
(map_list.hash.key << PAGE_SHIFT) non-sense.

Locking and access-management is exactly the same as before with an
additional lock inside of the vma-manager, which strictly wouldn't be
needed for gem.

v2:
 - rebase on drm-next
 - init nodes via drm_vma_node_reset() in drm_gem.c
v3:
 - fix tegra
v4:
 - remove duplicate if (drm_vma_node_has_offset()) checks
 - inline now trivial drm_vma_node_offset_addr() calls
v5:
 - skip node-reset on gem-init due to kzalloc()
 - do not allow mapping gem-objects with offsets (backwards compat)
 - remove unneccessary casts

Cc: Inki Dae <inki.dae@samsung.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@gmail.com>
2013-07-25 20:47:06 +10:00

269 lines
5.8 KiB
C

/*
* NVIDIA Tegra DRM GEM helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
* Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
*
* Based on the GEM/CMA helpers
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include "gem.h"
static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
{
return container_of(bo, struct tegra_bo, base);
}
static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_drm_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
drm_gem_object_unreference(&obj->gem);
mutex_unlock(&drm->struct_mutex);
}
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
{
struct tegra_bo *obj = host1x_to_drm_bo(bo);
return obj->paddr;
}
static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
{
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_drm_bo(bo);
return obj->vaddr;
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
}
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
struct tegra_bo *obj = host1x_to_drm_bo(bo);
return obj->vaddr + page * PAGE_SIZE;
}
static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
void *addr)
{
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_drm_bo(bo);
struct drm_device *drm = obj->gem.dev;
mutex_lock(&drm->struct_mutex);
drm_gem_object_reference(&obj->gem);
mutex_unlock(&drm->struct_mutex);
return bo;
}
const struct host1x_bo_ops tegra_bo_ops = {
.get = tegra_bo_get,
.put = tegra_bo_put,
.pin = tegra_bo_pin,
.unpin = tegra_bo_unpin,
.mmap = tegra_bo_mmap,
.munmap = tegra_bo_munmap,
.kmap = tegra_bo_kmap,
.kunmap = tegra_bo_kunmap,
};
static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
{
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
{
return (unsigned int)drm_vma_node_offset_addr(&bo->gem.vma_node);
}
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
{
struct tegra_bo *bo;
int err;
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %u\n",
size);
err = -ENOMEM;
goto err_dma;
}
err = drm_gem_object_init(drm, &bo->gem, size);
if (err)
goto err_init;
err = drm_gem_create_mmap_offset(&bo->gem);
if (err)
goto err_mmap;
return bo;
err_mmap:
drm_gem_object_release(&bo->gem);
err_init:
tegra_bo_destroy(drm, bo);
err_dma:
kfree(bo);
return ERR_PTR(err);
}
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
struct drm_device *drm,
unsigned int size,
unsigned int *handle)
{
struct tegra_bo *bo;
int ret;
bo = tegra_bo_create(drm, size);
if (IS_ERR(bo))
return bo;
ret = drm_gem_handle_create(file, &bo->gem, handle);
if (ret)
goto err;
drm_gem_object_unreference_unlocked(&bo->gem);
return bo;
err:
tegra_bo_free_object(&bo->gem);
return ERR_PTR(ret);
}
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
kfree(bo);
}
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_bo *bo;
if (args->pitch < min_pitch)
args->pitch = min_pitch;
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
bo = tegra_bo_create_with_handle(file, drm, args->size,
&args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
return 0;
}
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
mutex_lock(&drm->struct_mutex);
gem = drm_gem_object_lookup(drm, file, handle);
if (!gem) {
dev_err(drm->dev, "failed to lookup GEM object\n");
mutex_unlock(&drm->struct_mutex);
return -EINVAL;
}
bo = to_tegra_bo(gem);
*offset = tegra_bo_get_mmap_offset(bo);
drm_gem_object_unreference(gem);
mutex_unlock(&drm->struct_mutex);
return 0;
}
const struct vm_operations_struct tegra_bo_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
int ret;
ret = drm_gem_mmap(file, vma);
if (ret)
return ret;
gem = vma->vm_private_data;
bo = to_tegra_bo(gem);
ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
if (ret)
drm_gem_vm_close(vma);
return ret;
}
int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
unsigned int handle)
{
return drm_gem_handle_delete(file, handle);
}