Merge tag 'drm-intel-next-2016-08-08' of git://anongit.freedesktop.org/drm-intel into drm-next
- refactor ddi buffer programming a bit (Ville) - large-scale renaming to untangle naming in the gem code (Chris) - rework vma/active tracking for accurately reaping idle mappings of shared objects (Chris) - misc dp sst/mst probing corner case fixes (Ville) - tons of cleanup&tunings all around in gem - lockless (rcu-protected) request lookup, plus use it everywhere for non(b)locking waits (Chris) - pipe crc debugfs fixes (Rodrigo) - random fixes all over * tag 'drm-intel-next-2016-08-08' of git://anongit.freedesktop.org/drm-intel: (222 commits) drm/i915: Update DRIVER_DATE to 20160808 drm/i915: fix aliasing_ppgtt leak drm/i915: Update comment before i915_spin_request drm/i915: Use drm official vblank_no_hw_counter callback. drm/i915: Fix copy_to_user usage for pipe_crc Revert "drm/i915: Track active streams also for DP SST" drm/i915: fix WaInsertDummyPushConstPs drm/i915: Assert that the request hasn't been retired drm/i915: Repack fence tiling mode and stride into a single integer drm/i915: Document and reject invalid tiling modes drm/i915: Remove locking for get_tiling drm/i915: Remove pinned check from madvise ioctl drm/i915: Reduce locking inside swfinish ioctl drm/i915: Remove (struct_mutex) locking for busy-ioctl drm/i915: Remove (struct_mutex) locking for wait-ioctl drm/i915: Do a nonblocking wait first in pread/pwrite drm/i915: Remove unused no-shrinker-steal drm/i915: Tidy generation of the GTT mmap offset drm/i915/shrinker: Wait before acquiring struct_mutex under oom drm/i915: Simplify do_idling() (Ironlake vt-d w/a) ...
This commit is contained in:
commit
fc93ff608b
62 changed files with 6183 additions and 5902 deletions
|
@ -70,6 +70,9 @@ Frontbuffer Tracking
|
|||
.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
|
||||
:doc: frontbuffer tracking
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
|
||||
:internal:
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
|||
i915-y += i915_cmd_parser.o \
|
||||
i915_gem_batch_pool.o \
|
||||
i915_gem_context.o \
|
||||
i915_gem_debug.o \
|
||||
i915_gem_dmabuf.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_execbuffer.o \
|
||||
|
@ -33,6 +32,7 @@ i915-y += i915_cmd_parser.o \
|
|||
i915_gem_gtt.o \
|
||||
i915_gem.o \
|
||||
i915_gem_render_state.o \
|
||||
i915_gem_request.o \
|
||||
i915_gem_shrinker.o \
|
||||
i915_gem_stolen.o \
|
||||
i915_gem_tiling.o \
|
||||
|
@ -40,6 +40,7 @@ i915-y += i915_cmd_parser.o \
|
|||
i915_gpu_error.o \
|
||||
i915_trace_points.o \
|
||||
intel_breadcrumbs.o \
|
||||
intel_engine_cs.o \
|
||||
intel_lrc.o \
|
||||
intel_mocs.o \
|
||||
intel_ringbuffer.o \
|
||||
|
|
|
@ -62,23 +62,23 @@
|
|||
* The parser always rejects such commands.
|
||||
*
|
||||
* The majority of the problematic commands fall in the MI_* range, with only a
|
||||
* few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
|
||||
* few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
|
||||
*
|
||||
* Implementation:
|
||||
* Each ring maintains tables of commands and registers which the parser uses in
|
||||
* scanning batch buffers submitted to that ring.
|
||||
* Each engine maintains tables of commands and registers which the parser
|
||||
* uses in scanning batch buffers submitted to that engine.
|
||||
*
|
||||
* Since the set of commands that the parser must check for is significantly
|
||||
* smaller than the number of commands supported, the parser tables contain only
|
||||
* those commands required by the parser. This generally works because command
|
||||
* opcode ranges have standard command length encodings. So for commands that
|
||||
* the parser does not need to check, it can easily skip them. This is
|
||||
* implemented via a per-ring length decoding vfunc.
|
||||
* implemented via a per-engine length decoding vfunc.
|
||||
*
|
||||
* Unfortunately, there are a number of commands that do not follow the standard
|
||||
* length encoding for their opcode range, primarily amongst the MI_* commands.
|
||||
* To handle this, the parser provides a way to define explicit "skip" entries
|
||||
* in the per-ring command tables.
|
||||
* in the per-engine command tables.
|
||||
*
|
||||
* Other command table entries map fairly directly to high level categories
|
||||
* mentioned above: rejected, master-only, register whitelist. The parser
|
||||
|
@ -603,7 +603,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool validate_cmds_sorted(struct intel_engine_cs *engine,
|
||||
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_table *cmd_tables,
|
||||
int cmd_table_count)
|
||||
{
|
||||
|
@ -624,8 +624,10 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
|
|||
u32 curr = desc->cmd.value & desc->cmd.mask;
|
||||
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
|
||||
engine->id, i, j, curr, previous);
|
||||
DRM_ERROR("CMD: %s [%d] command table not sorted: "
|
||||
"table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
|
||||
engine->name, engine->id,
|
||||
i, j, curr, previous);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
|
@ -636,7 +638,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool check_sorted(int ring_id,
|
||||
static bool check_sorted(const struct intel_engine_cs *engine,
|
||||
const struct drm_i915_reg_descriptor *reg_table,
|
||||
int reg_count)
|
||||
{
|
||||
|
@ -648,8 +650,10 @@ static bool check_sorted(int ring_id,
|
|||
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
|
||||
|
||||
if (curr < previous) {
|
||||
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
|
||||
ring_id, i, curr, previous);
|
||||
DRM_ERROR("CMD: %s [%d] register table not sorted: "
|
||||
"entry=%d reg=0x%08X prev=0x%08X\n",
|
||||
engine->name, engine->id,
|
||||
i, curr, previous);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
|
@ -666,7 +670,7 @@ static bool validate_regs_sorted(struct intel_engine_cs *engine)
|
|||
|
||||
for (i = 0; i < engine->reg_table_count; i++) {
|
||||
table = &engine->reg_tables[i];
|
||||
if (!check_sorted(engine->id, table->regs, table->num_regs))
|
||||
if (!check_sorted(engine, table->regs, table->num_regs))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -736,7 +740,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
|
||||
* intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
|
||||
* @engine: the engine to initialize
|
||||
*
|
||||
* Optionally initializes fields related to batch buffer command parsing in the
|
||||
|
@ -745,7 +749,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
|
|||
*
|
||||
* Return: non-zero if initialization fails
|
||||
*/
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
||||
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
|
@ -806,8 +810,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
|
||||
engine->id);
|
||||
MISSING_CASE(engine->id);
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
@ -829,13 +832,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
|
||||
* intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
|
||||
* @engine: the engine to clean up
|
||||
*
|
||||
* Releases any resources related to command parsing that may have been
|
||||
* initialized for the specified ring.
|
||||
* initialized for the specified engine.
|
||||
*/
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
|
||||
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->needs_cmd_parser)
|
||||
return;
|
||||
|
@ -866,9 +869,9 @@ find_cmd_in_table(struct intel_engine_cs *engine,
|
|||
* Returns a pointer to a descriptor for the command specified by cmd_header.
|
||||
*
|
||||
* The caller must supply space for a default descriptor via the default_desc
|
||||
* parameter. If no descriptor for the specified command exists in the ring's
|
||||
* parameter. If no descriptor for the specified command exists in the engine's
|
||||
* command parser tables, this function fills in default_desc based on the
|
||||
* ring's default length encoding and returns default_desc.
|
||||
* engine's default length encoding and returns default_desc.
|
||||
*/
|
||||
static const struct drm_i915_cmd_descriptor*
|
||||
find_cmd(struct intel_engine_cs *engine,
|
||||
|
@ -1023,15 +1026,16 @@ unpin_src:
|
|||
}
|
||||
|
||||
/**
|
||||
* i915_needs_cmd_parser() - should a given ring use software command parsing?
|
||||
* intel_engine_needs_cmd_parser() - should a given engine use software
|
||||
* command parsing?
|
||||
* @engine: the engine in question
|
||||
*
|
||||
* Only certain platforms require software batch buffer command parsing, and
|
||||
* only when enabled via module parameter.
|
||||
*
|
||||
* Return: true if the ring requires software command parsing
|
||||
* Return: true if the engine requires software command parsing
|
||||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
|
||||
bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->needs_cmd_parser)
|
||||
return false;
|
||||
|
@ -1078,8 +1082,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
|||
reg_addr);
|
||||
|
||||
if (!reg) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
|
||||
reg_addr, *cmd, engine->id);
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
|
||||
reg_addr, *cmd, engine->exec_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1159,11 +1163,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
|||
desc->bits[i].mask;
|
||||
|
||||
if (dword != desc->bits[i].expected) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n",
|
||||
*cmd,
|
||||
desc->bits[i].mask,
|
||||
desc->bits[i].expected,
|
||||
dword, engine->id);
|
||||
dword, engine->exec_id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1189,12 +1193,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
|||
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
|
||||
* if the batch appears legal but should use hardware parsing
|
||||
*/
|
||||
int i915_parse_cmds(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
u32 batch_len,
|
||||
bool is_master)
|
||||
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
u32 batch_len,
|
||||
bool is_master)
|
||||
{
|
||||
u32 *cmd, *batch_base, *batch_end;
|
||||
struct drm_i915_cmd_descriptor default_desc = { 0 };
|
||||
|
@ -1295,7 +1299,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* If the command parser is not enabled, report 0 - unsupported */
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (i915_needs_cmd_parser(engine)) {
|
||||
if (intel_engine_needs_cmd_parser(engine)) {
|
||||
active = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
|
||||
static char get_active_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->active ? '*' : ' ';
|
||||
return i915_gem_object_is_active(obj) ? '*' : ' ';
|
||||
}
|
||||
|
||||
static char get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
|
@ -101,7 +101,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj)
|
|||
|
||||
static char get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
switch (obj->tiling_mode) {
|
||||
switch (i915_gem_object_get_tiling(obj)) {
|
||||
default:
|
||||
case I915_TILING_NONE: return ' ';
|
||||
case I915_TILING_X: return 'X';
|
||||
|
@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
|
|||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
|
||||
if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
|
||||
size += vma->node.size;
|
||||
}
|
||||
|
||||
|
@ -138,6 +138,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_vma *vma;
|
||||
unsigned int frontbuffer_bits;
|
||||
int pin_count = 0;
|
||||
enum intel_engine_id id;
|
||||
|
||||
|
@ -155,17 +156,20 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
obj->base.write_domain);
|
||||
for_each_engine_id(engine, dev_priv, id)
|
||||
seq_printf(m, "%x ",
|
||||
i915_gem_request_get_seqno(obj->last_read_req[id]));
|
||||
i915_gem_active_get_seqno(&obj->last_read[id],
|
||||
&obj->base.dev->struct_mutex));
|
||||
seq_printf(m, "] %x %x%s%s%s",
|
||||
i915_gem_request_get_seqno(obj->last_write_req),
|
||||
i915_gem_request_get_seqno(obj->last_fenced_req),
|
||||
i915_gem_active_get_seqno(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex),
|
||||
i915_gem_active_get_seqno(&obj->last_fence,
|
||||
&obj->base.dev->struct_mutex),
|
||||
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
|
||||
obj->dirty ? " dirty" : "",
|
||||
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
|
||||
if (obj->base.name)
|
||||
seq_printf(m, " (name: %d)", obj->base.name);
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (vma->pin_count > 0)
|
||||
if (i915_vma_is_pinned(vma))
|
||||
pin_count++;
|
||||
}
|
||||
seq_printf(m, " (pinned x %d)", pin_count);
|
||||
|
@ -174,10 +178,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
if (obj->fence_reg != I915_FENCE_REG_NONE)
|
||||
seq_printf(m, " (fence: %d)", obj->fence_reg);
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
|
||||
vma->is_ggtt ? "g" : "pp",
|
||||
i915_vma_is_ggtt(vma) ? "g" : "pp",
|
||||
vma->node.start, vma->node.size);
|
||||
if (vma->is_ggtt)
|
||||
if (i915_vma_is_ggtt(vma))
|
||||
seq_printf(m, ", type: %u", vma->ggtt_view.type);
|
||||
seq_puts(m, ")");
|
||||
}
|
||||
|
@ -192,11 +199,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
|||
*t = '\0';
|
||||
seq_printf(m, " (%s mappable)", s);
|
||||
}
|
||||
if (obj->last_write_req != NULL)
|
||||
seq_printf(m, " (%s)",
|
||||
i915_gem_request_get_engine(obj->last_write_req)->name);
|
||||
if (obj->frontbuffer_bits)
|
||||
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
|
||||
|
||||
engine = i915_gem_active_get_engine(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
if (engine)
|
||||
seq_printf(m, " (%s)", engine->name);
|
||||
|
||||
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
|
||||
if (frontbuffer_bits)
|
||||
seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
|
||||
}
|
||||
|
||||
static int i915_gem_object_list_info(struct seq_file *m, void *data)
|
||||
|
@ -338,47 +349,30 @@ static int per_file_stats(int id, void *ptr, void *data)
|
|||
|
||||
stats->count++;
|
||||
stats->total += obj->base.size;
|
||||
|
||||
if (!obj->bind_count)
|
||||
stats->unbound += obj->base.size;
|
||||
if (obj->base.name || obj->base.dma_buf)
|
||||
stats->shared += obj->base.size;
|
||||
|
||||
if (USES_FULL_PPGTT(obj->base.dev)) {
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
if (i915_vma_is_ggtt(vma)) {
|
||||
stats->global += vma->node.size;
|
||||
} else {
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
|
||||
|
||||
if (ppgtt->base.file != stats->file_priv)
|
||||
continue;
|
||||
|
||||
if (vma->is_ggtt) {
|
||||
stats->global += obj->base.size;
|
||||
continue;
|
||||
}
|
||||
|
||||
ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
|
||||
if (ppgtt->file_priv != stats->file_priv)
|
||||
continue;
|
||||
|
||||
if (obj->active) /* XXX per-vma statistic */
|
||||
stats->active += obj->base.size;
|
||||
else
|
||||
stats->inactive += obj->base.size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (i915_gem_obj_ggtt_bound(obj)) {
|
||||
stats->global += obj->base.size;
|
||||
if (obj->active)
|
||||
stats->active += obj->base.size;
|
||||
else
|
||||
stats->inactive += obj->base.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (i915_vma_is_active(vma))
|
||||
stats->active += vma->node.size;
|
||||
else
|
||||
stats->inactive += vma->node.size;
|
||||
}
|
||||
|
||||
if (!list_empty(&obj->global_list))
|
||||
stats->unbound += obj->base.size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -425,8 +419,8 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
|
|||
for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
|
||||
if (ctx->engine[n].state)
|
||||
per_file_stats(0, ctx->engine[n].state, data);
|
||||
if (ctx->engine[n].ringbuf)
|
||||
per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
|
||||
if (ctx->engine[n].ring)
|
||||
per_file_stats(0, ctx->engine[n].ring->obj, data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -754,13 +748,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
|||
int count;
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(req, &engine->request_list, list)
|
||||
list_for_each_entry(req, &engine->request_list, link)
|
||||
count++;
|
||||
if (count == 0)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "%s requests: %d\n", engine->name, count);
|
||||
list_for_each_entry(req, &engine->request_list, list) {
|
||||
list_for_each_entry(req, &engine->request_list, link) {
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -768,7 +762,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
|
|||
if (req->pid)
|
||||
task = pid_task(req->pid, PIDTYPE_PID);
|
||||
seq_printf(m, " %x @ %d: %s [%d]\n",
|
||||
req->seqno,
|
||||
req->fence.seqno,
|
||||
(int) (jiffies - req->emitted_jiffies),
|
||||
task ? task->comm : "<unknown>",
|
||||
task ? task->pid : -1);
|
||||
|
@ -1205,8 +1199,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
if (IS_GEN5(dev)) {
|
||||
u16 rgvswctl = I915_READ16(MEMSWCTL);
|
||||
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
|
||||
|
@ -1381,6 +1373,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
|
||||
seq_printf(m, "Min freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
|
||||
seq_printf(m, "Boost freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
|
||||
seq_printf(m, "Max freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||
seq_printf(m,
|
||||
|
@ -1419,7 +1413,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
for_each_engine_id(engine, dev_priv, id) {
|
||||
acthd[id] = intel_ring_get_active_head(engine);
|
||||
acthd[id] = intel_engine_get_active_head(engine);
|
||||
seqno[id] = intel_engine_get_seqno(engine);
|
||||
}
|
||||
|
||||
|
@ -1602,6 +1596,7 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
|
||||
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
|
||||
unsigned forcewake_count;
|
||||
int count = 0, ret;
|
||||
|
||||
|
@ -1629,6 +1624,10 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
|
||||
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
|
||||
rcctl1 = I915_READ(GEN6_RC_CONTROL);
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
|
||||
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
|
||||
|
@ -1647,6 +1646,12 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
|
||||
seq_printf(m, "RC6 Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
seq_printf(m, "Render Well Gating Enabled: %s\n",
|
||||
yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
|
||||
seq_printf(m, "Media Well Gating Enabled: %s\n",
|
||||
yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
|
||||
}
|
||||
seq_printf(m, "Deep RC6 Enabled: %s\n",
|
||||
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
|
||||
seq_printf(m, "Deepest RC6 Enabled: %s\n",
|
||||
|
@ -1675,6 +1680,14 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
|
||||
seq_printf(m, "Core Power Down: %s\n",
|
||||
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
seq_printf(m, "Render Power Well: %s\n",
|
||||
(gen9_powergate_status &
|
||||
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
|
||||
seq_printf(m, "Media Power Well: %s\n",
|
||||
(gen9_powergate_status &
|
||||
GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
|
||||
}
|
||||
|
||||
/* Not exactly sure what this is */
|
||||
seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
|
||||
|
@ -1692,7 +1705,7 @@ static int gen6_drpc_info(struct seq_file *m)
|
|||
GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
|
||||
seq_printf(m, "RC6++ voltage: %dmV\n",
|
||||
GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
|
||||
return 0;
|
||||
return i915_forcewake_domains(m, NULL);
|
||||
}
|
||||
|
||||
static int i915_drpc_info(struct seq_file *m, void *unused)
|
||||
|
@ -1896,8 +1909,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -2019,12 +2030,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void describe_ctx_ringbuf(struct seq_file *m,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
|
||||
{
|
||||
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
|
||||
ringbuf->space, ringbuf->head, ringbuf->tail,
|
||||
ringbuf->last_retired_head);
|
||||
ring->space, ring->head, ring->tail,
|
||||
ring->last_retired_head);
|
||||
}
|
||||
|
||||
static int i915_context_status(struct seq_file *m, void *unused)
|
||||
|
@ -2068,8 +2078,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
|
|||
seq_putc(m, ce->initialised ? 'I' : 'i');
|
||||
if (ce->state)
|
||||
describe_obj(m, ce->state);
|
||||
if (ce->ringbuf)
|
||||
describe_ctx_ringbuf(m, ce->ringbuf);
|
||||
if (ce->ring)
|
||||
describe_ctx_ring(m, ce->ring);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
@ -2467,13 +2477,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
|||
list_empty(&file_priv->rps.link) ? "" : ", active");
|
||||
rcu_read_unlock();
|
||||
}
|
||||
seq_printf(m, "Semaphore boosts: %d%s\n",
|
||||
dev_priv->rps.semaphores.boosts,
|
||||
list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
|
||||
seq_printf(m, "MMIO flip boosts: %d%s\n",
|
||||
dev_priv->rps.mmioflips.boosts,
|
||||
list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
|
||||
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
|
||||
seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
|
||||
spin_unlock(&dev_priv->rps.client_lock);
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
|
||||
|
@ -3228,7 +3232,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
enum intel_engine_id id;
|
||||
int j, ret;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev_priv)) {
|
||||
if (!i915.semaphores) {
|
||||
seq_puts(m, "Semaphores are disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -3621,7 +3625,6 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
|
|||
while (n_entries > 0) {
|
||||
struct intel_pipe_crc_entry *entry =
|
||||
&pipe_crc->entries[pipe_crc->tail];
|
||||
int ret;
|
||||
|
||||
if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
|
||||
INTEL_PIPE_CRC_ENTRIES_NR) < 1)
|
||||
|
@ -3638,8 +3641,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
|
|||
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
|
||||
if (ret == PIPE_CRC_LINE_LEN)
|
||||
if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
|
||||
return -EFAULT;
|
||||
|
||||
user_buf += PIPE_CRC_LINE_LEN;
|
||||
|
@ -4921,7 +4923,7 @@ i915_drop_caches_set(void *data, u64 val)
|
|||
return ret;
|
||||
|
||||
if (val & DROP_ACTIVE) {
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
ret = i915_gem_wait_for_idle(dev_priv, true);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -4950,20 +4952,11 @@ i915_max_freq_get(void *data, u64 *val)
|
|||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return -ENODEV;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4978,8 +4971,6 @@ i915_max_freq_set(void *data, u64 val)
|
|||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return -ENODEV;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
|
@ -5017,20 +5008,11 @@ i915_min_freq_get(void *data, u64 *val)
|
|||
{
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
if (INTEL_GEN(dev_priv) < 6)
|
||||
return -ENODEV;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5042,11 +5024,9 @@ i915_min_freq_set(void *data, u64 val)
|
|||
u32 hw_max, hw_min;
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
if (INTEL_GEN(dev_priv) < 6)
|
||||
return -ENODEV;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
|
@ -5268,7 +5248,8 @@ static void broadwell_sseu_device_status(struct drm_device *dev,
|
|||
static int i915_sseu_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct sseu_dev_status stat;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
|
@ -5298,6 +5279,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
|||
|
||||
seq_puts(m, "SSEU Device Status\n");
|
||||
memset(&stat, 0, sizeof(stat));
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
cherryview_sseu_device_status(dev, &stat);
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
|
@ -5305,6 +5289,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
|||
} else if (INTEL_INFO(dev)->gen >= 9) {
|
||||
gen9_sseu_device_status(dev, &stat);
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
seq_printf(m, " Enabled Slice Total: %u\n",
|
||||
stat.slice_total);
|
||||
seq_printf(m, " Enabled Subslice Total: %u\n",
|
||||
|
|
|
@ -228,27 +228,6 @@ static void intel_detect_pch(struct drm_device *dev)
|
|||
pci_dev_put(pch);
|
||||
}
|
||||
|
||||
bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) < 6)
|
||||
return false;
|
||||
|
||||
if (i915.semaphores >= 0)
|
||||
return i915.semaphores;
|
||||
|
||||
/* TODO: make semaphores and Execlists play nicely together */
|
||||
if (i915.enable_execlists)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* Enable semaphores on SNB when IO remapping is off */
|
||||
if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int i915_getparam(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
|
@ -324,7 +303,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
value = 1;
|
||||
break;
|
||||
case I915_PARAM_HAS_SEMAPHORES:
|
||||
value = i915_semaphore_is_enabled(dev_priv);
|
||||
value = i915.semaphores;
|
||||
break;
|
||||
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
|
||||
value = 1;
|
||||
|
@ -999,6 +978,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
i915.enable_ppgtt =
|
||||
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
|
||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
||||
|
||||
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
|
||||
DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1011,8 +993,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
uint32_t aperture_size;
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
|
@ -1022,16 +1002,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_sanitize_options(dev_priv);
|
||||
|
||||
ret = i915_ggtt_init_hw(dev);
|
||||
ret = i915_ggtt_probe_hw(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ggtt_enable_hw(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable GGTT\n");
|
||||
goto out_ggtt;
|
||||
}
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
ret = i915_kick_out_firmware_fb(dev_priv);
|
||||
|
@ -1046,6 +1020,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
goto out_ggtt;
|
||||
}
|
||||
|
||||
ret = i915_ggtt_init_hw(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_ggtt_enable_hw(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable GGTT\n");
|
||||
goto out_ggtt;
|
||||
}
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
/* overlay on gen2 is broken and can't address above 1G */
|
||||
|
@ -1058,7 +1042,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
||||
* using 32bit addressing, overwriting memory if HWS is located
|
||||
* above 4GB.
|
||||
|
@ -1077,19 +1060,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
aperture_size = ggtt->mappable_end;
|
||||
|
||||
ggtt->mappable =
|
||||
io_mapping_create_wc(ggtt->mappable_base,
|
||||
aperture_size);
|
||||
if (!ggtt->mappable) {
|
||||
ret = -EIO;
|
||||
goto out_ggtt;
|
||||
}
|
||||
|
||||
ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
|
||||
aperture_size);
|
||||
|
||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
|
@ -1118,7 +1088,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
|
||||
out_ggtt:
|
||||
i915_ggtt_cleanup_hw(dev);
|
||||
i915_ggtt_cleanup_hw(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1130,15 +1100,12 @@ out_ggtt:
|
|||
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
pci_disable_msi(dev->pdev);
|
||||
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
arch_phys_wc_del(ggtt->mtrr);
|
||||
io_mapping_free(ggtt->mappable);
|
||||
i915_ggtt_cleanup_hw(dev);
|
||||
i915_ggtt_cleanup_hw(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1343,7 +1310,7 @@ void i915_driver_unload(struct drm_device *dev)
|
|||
i915_destroy_error_state(dev);
|
||||
|
||||
/* Flush any outstanding unpin_work. */
|
||||
flush_workqueue(dev_priv->wq);
|
||||
drain_workqueue(dev_priv->wq);
|
||||
|
||||
intel_guc_fini(dev);
|
||||
i915_gem_fini(dev);
|
||||
|
@ -1458,8 +1425,6 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
|
||||
intel_guc_suspend(dev);
|
||||
|
||||
intel_suspend_gt_powersave(dev_priv);
|
||||
|
||||
intel_display_suspend(dev);
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
|
@ -1586,15 +1551,13 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
ret = i915_ggtt_enable_hw(dev);
|
||||
ret = i915_ggtt_enable_hw(dev_priv);
|
||||
if (ret)
|
||||
DRM_ERROR("failed to re-enable GGTT\n");
|
||||
|
||||
intel_csr_ucode_resume(dev_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_gem_resume(dev);
|
||||
|
||||
i915_restore_state(dev);
|
||||
intel_opregion_setup(dev_priv);
|
||||
|
@ -1652,6 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
|
||||
intel_opregion_notify_adapter(dev_priv, PCI_D0);
|
||||
|
||||
intel_autoenable_gt_powersave(dev_priv);
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
@ -1778,8 +1742,6 @@ int i915_reset(struct drm_i915_private *dev_priv)
|
|||
unsigned reset_counter;
|
||||
int ret;
|
||||
|
||||
intel_reset_gt_powersave(dev_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Clear any previous failed attempts at recovery. Time to try again. */
|
||||
|
@ -1835,8 +1797,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
|
|||
* previous concerns that it doesn't respond well to some forms
|
||||
* of re-init after reset.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen > 5)
|
||||
intel_enable_gt_powersave(dev_priv);
|
||||
intel_autoenable_gt_powersave(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -2462,7 +2423,6 @@ static int intel_runtime_resume(struct device *device)
|
|||
* we can do is to hope that things will still work (and disable RPM).
|
||||
*/
|
||||
i915_gem_init_swizzling(dev);
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
|
@ -2618,6 +2578,7 @@ static struct drm_driver driver = {
|
|||
.postclose = i915_driver_postclose,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
|
||||
.gem_close_object = i915_gem_close_object,
|
||||
.gem_free_object = i915_gem_free_object,
|
||||
.gem_vm_ops = &i915_gem_vm_ops,
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include "i915_gem.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
#include "i915_gem_request.h"
|
||||
|
||||
#include "intel_gvt.h"
|
||||
|
||||
|
@ -69,7 +70,7 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160711"
|
||||
#define DRIVER_DATE "20160808"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
|
@ -401,7 +402,7 @@ struct drm_i915_file_private {
|
|||
unsigned boosts;
|
||||
} rps;
|
||||
|
||||
unsigned int bsd_ring;
|
||||
unsigned int bsd_engine;
|
||||
};
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
|
@ -431,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
|
|||
#define DRIVER_MINOR 6
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define WATCH_LISTS 0
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
|
@ -511,13 +510,13 @@ struct drm_i915_error_state {
|
|||
struct intel_display_error_state *display;
|
||||
struct drm_i915_error_object *semaphore_obj;
|
||||
|
||||
struct drm_i915_error_ring {
|
||||
bool valid;
|
||||
struct drm_i915_error_engine {
|
||||
int engine_id;
|
||||
/* Software tracked state */
|
||||
bool waiting;
|
||||
int num_waiters;
|
||||
int hangcheck_score;
|
||||
enum intel_ring_hangcheck_action hangcheck_action;
|
||||
enum intel_engine_hangcheck_action hangcheck_action;
|
||||
int num_requests;
|
||||
|
||||
/* our own tracking of ring head and tail */
|
||||
|
@ -577,7 +576,7 @@ struct drm_i915_error_state {
|
|||
|
||||
pid_t pid;
|
||||
char comm[TASK_COMM_LEN];
|
||||
} ring[I915_NUM_ENGINES];
|
||||
} engine[I915_NUM_ENGINES];
|
||||
|
||||
struct drm_i915_error_buffer {
|
||||
u32 size;
|
||||
|
@ -592,7 +591,7 @@ struct drm_i915_error_state {
|
|||
u32 dirty:1;
|
||||
u32 purgeable:1;
|
||||
u32 userptr:1;
|
||||
s32 ring:4;
|
||||
s32 engine:4;
|
||||
u32 cache_level:3;
|
||||
} **active_bo, **pinned_bo;
|
||||
|
||||
|
@ -893,7 +892,7 @@ struct i915_gem_context {
|
|||
|
||||
struct intel_context {
|
||||
struct drm_i915_gem_object *state;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
struct intel_ring *ring;
|
||||
struct i915_vma *lrc_vma;
|
||||
uint32_t *lrc_reg_state;
|
||||
u64 lrc_desc;
|
||||
|
@ -908,6 +907,7 @@ struct i915_gem_context {
|
|||
struct list_head link;
|
||||
|
||||
u8 remap_slice;
|
||||
bool closed:1;
|
||||
};
|
||||
|
||||
enum fb_op_origin {
|
||||
|
@ -1173,6 +1173,7 @@ struct intel_gen6_power_mgmt {
|
|||
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
|
||||
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
|
||||
u8 min_freq; /* AKA RPn. Minimum frequency */
|
||||
u8 boost_freq; /* Frequency to request when wait boosting */
|
||||
u8 idle_freq; /* Frequency to request when we are idle */
|
||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
|
@ -1190,11 +1191,9 @@ struct intel_gen6_power_mgmt {
|
|||
bool client_boost;
|
||||
|
||||
bool enabled;
|
||||
struct delayed_work delayed_resume_work;
|
||||
struct delayed_work autoenable_work;
|
||||
unsigned boosts;
|
||||
|
||||
struct intel_rps_client semaphores, mmioflips;
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei up_ei, down_ei;
|
||||
|
||||
|
@ -1319,7 +1318,6 @@ struct i915_gem_mm {
|
|||
struct notifier_block oom_notifier;
|
||||
struct notifier_block vmap_notifier;
|
||||
struct shrinker shrinker;
|
||||
bool shrinker_no_lock_stealing;
|
||||
|
||||
/** LRU list of objects with fence regs on them. */
|
||||
struct list_head fence_list;
|
||||
|
@ -1331,7 +1329,7 @@ struct i915_gem_mm {
|
|||
bool interruptible;
|
||||
|
||||
/* the indicator for dispatch video commands on two BSD rings */
|
||||
unsigned int bsd_ring_dispatch_index;
|
||||
unsigned int bsd_engine_dispatch_index;
|
||||
|
||||
/** Bit 6 swizzling required for X tiling */
|
||||
uint32_t bit_6_swizzle_x;
|
||||
|
@ -1670,7 +1668,7 @@ struct intel_pipe_crc {
|
|||
};
|
||||
|
||||
struct i915_frontbuffer_tracking {
|
||||
struct mutex lock;
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
* Tracking bits for delayed frontbuffer flushing du to gpu activity or
|
||||
|
@ -1705,18 +1703,6 @@ struct i915_virtual_gpu {
|
|||
bool active;
|
||||
};
|
||||
|
||||
struct i915_execbuffer_params {
|
||||
struct drm_device *dev;
|
||||
struct drm_file *file;
|
||||
uint32_t dispatch_flags;
|
||||
uint32_t args_batch_start_offset;
|
||||
uint64_t batch_obj_vm_offset;
|
||||
struct intel_engine_cs *engine;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct i915_gem_context *ctx;
|
||||
struct drm_i915_gem_request *request;
|
||||
};
|
||||
|
||||
/* used in computing the new watermarks state */
|
||||
struct intel_wm_config {
|
||||
unsigned int num_pipes_active;
|
||||
|
@ -1769,7 +1755,7 @@ struct drm_i915_private {
|
|||
struct i915_gem_context *kernel_context;
|
||||
struct intel_engine_cs engine[I915_NUM_ENGINES];
|
||||
struct drm_i915_gem_object *semaphore_obj;
|
||||
uint32_t last_seqno, next_seqno;
|
||||
u32 next_seqno;
|
||||
|
||||
struct drm_dma_handle *status_page_dmah;
|
||||
struct resource mch_res;
|
||||
|
@ -2016,12 +2002,7 @@ struct drm_i915_private {
|
|||
|
||||
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
||||
struct {
|
||||
int (*execbuf_submit)(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
int (*init_engines)(struct drm_device *dev);
|
||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||
void (*stop_engine)(struct intel_engine_cs *engine);
|
||||
|
||||
/**
|
||||
* Is the GPU currently considered idle, or busy executing
|
||||
|
@ -2144,8 +2125,6 @@ struct drm_i915_gem_object_ops {
|
|||
*/
|
||||
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
|
||||
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
|
||||
#define INTEL_FRONTBUFFER_BITS \
|
||||
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
|
||||
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
|
||||
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
|
||||
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
|
||||
|
@ -2169,18 +2148,21 @@ struct drm_i915_gem_object {
|
|||
struct drm_mm_node *stolen;
|
||||
struct list_head global_list;
|
||||
|
||||
struct list_head engine_list[I915_NUM_ENGINES];
|
||||
/** Used in execbuf to temporarily hold a ref */
|
||||
struct list_head obj_exec_link;
|
||||
|
||||
struct list_head batch_pool_link;
|
||||
|
||||
unsigned long flags;
|
||||
/**
|
||||
* This is set if the object is on the active lists (has pending
|
||||
* rendering and so a non-zero seqno), and is not set if it i s on
|
||||
* inactive (ready to be unbound) list.
|
||||
*/
|
||||
unsigned int active:I915_NUM_ENGINES;
|
||||
#define I915_BO_ACTIVE_SHIFT 0
|
||||
#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
|
||||
#define __I915_BO_ACTIVE(bo) \
|
||||
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
|
||||
|
||||
/**
|
||||
* This is set if the object has been written to since last bound
|
||||
|
@ -2200,10 +2182,6 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
unsigned int madv:2;
|
||||
|
||||
/**
|
||||
* Current tiling mode for the object.
|
||||
*/
|
||||
unsigned int tiling_mode:2;
|
||||
/**
|
||||
* Whether the tiling parameters for the currently associated fence
|
||||
* register have changed. Note that for the purposes of tracking
|
||||
|
@ -2234,9 +2212,17 @@ struct drm_i915_gem_object {
|
|||
unsigned int cache_level:3;
|
||||
unsigned int cache_dirty:1;
|
||||
|
||||
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
|
||||
atomic_t frontbuffer_bits;
|
||||
|
||||
/** Current tiling stride for the object, if it's tiled. */
|
||||
unsigned int tiling_and_stride;
|
||||
#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
|
||||
#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
|
||||
#define STRIDE_MASK (~TILING_MASK)
|
||||
|
||||
unsigned int has_wc_mmap;
|
||||
/** Count of VMA actually bound by this object */
|
||||
unsigned int bind_count;
|
||||
unsigned int pin_display;
|
||||
|
||||
struct sg_table *pages;
|
||||
|
@ -2256,14 +2242,10 @@ struct drm_i915_gem_object {
|
|||
* requests on one ring where the write request is older than the
|
||||
* read request. This allows for the CPU to read from an active
|
||||
* buffer by only waiting for the write to complete.
|
||||
* */
|
||||
struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
|
||||
struct drm_i915_gem_request *last_write_req;
|
||||
/** Breadcrumb of last fenced GPU access to the buffer. */
|
||||
struct drm_i915_gem_request *last_fenced_req;
|
||||
|
||||
/** Current tiling stride for the object, if it's tiled. */
|
||||
uint32_t stride;
|
||||
*/
|
||||
struct i915_gem_active last_read[I915_NUM_ENGINES];
|
||||
struct i915_gem_active last_write;
|
||||
struct i915_gem_active last_fence;
|
||||
|
||||
/** References from framebuffers, locks out tiling changes. */
|
||||
unsigned long framebuffer_references;
|
||||
|
@ -2287,7 +2269,56 @@ struct drm_i915_gem_object {
|
|||
} userptr;
|
||||
};
|
||||
};
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
|
||||
static inline struct drm_i915_gem_object *
|
||||
to_intel_bo(struct drm_gem_object *gem)
|
||||
{
|
||||
/* Assert that to_intel_bo(NULL) == NULL */
|
||||
BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
|
||||
|
||||
return container_of(gem, struct drm_i915_gem_object, base);
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_gem_object_lookup(struct drm_file *file, u32 handle)
|
||||
{
|
||||
return to_intel_bo(drm_gem_object_lookup(file, handle));
|
||||
}
|
||||
|
||||
__deprecated
|
||||
extern struct drm_gem_object *
|
||||
drm_gem_object_lookup(struct drm_file *file, u32 handle);
|
||||
|
||||
__attribute__((nonnull))
|
||||
static inline struct drm_i915_gem_object *
|
||||
i915_gem_object_get(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_reference(&obj->base);
|
||||
return obj;
|
||||
}
|
||||
|
||||
__deprecated
|
||||
extern void drm_gem_object_reference(struct drm_gem_object *);
|
||||
|
||||
__attribute__((nonnull))
|
||||
static inline void
|
||||
i915_gem_object_put(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
|
||||
__deprecated
|
||||
extern void drm_gem_object_unreference(struct drm_gem_object *);
|
||||
|
||||
__attribute__((nonnull))
|
||||
static inline void
|
||||
i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
}
|
||||
|
||||
__deprecated
|
||||
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
||||
|
@ -2295,6 +2326,55 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
|||
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_object_get_active(obj);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
|
||||
{
|
||||
obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
|
||||
{
|
||||
obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
|
||||
int engine)
|
||||
{
|
||||
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->tiling_and_stride & TILING_MASK;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->tiling_and_stride & STRIDE_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimised SGL iterator for GEM objects
|
||||
*/
|
||||
|
@ -2365,171 +2445,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
|
|||
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
|
||||
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
|
||||
|
||||
/**
|
||||
* Request queue structure.
|
||||
*
|
||||
* The request queue allows us to note sequence numbers that have been emitted
|
||||
* and may be associated with active buffers to be retired.
|
||||
*
|
||||
* By keeping this list, we can avoid having to do questionable sequence
|
||||
* number comparisons on buffer last_read|write_seqno. It also allows an
|
||||
* emission time to be associated with the request for tracking how far ahead
|
||||
* of the GPU the submission is.
|
||||
*
|
||||
* The requests are reference counted, so upon creation they should have an
|
||||
* initial reference taken using kref_init
|
||||
*/
|
||||
struct drm_i915_gem_request {
|
||||
struct kref ref;
|
||||
|
||||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_signal_node signaling;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
* this request.
|
||||
*/
|
||||
u32 previous_seqno;
|
||||
|
||||
/** GEM sequence number associated with this request,
|
||||
* when the HWS breadcrumb is equal or greater than this the GPU
|
||||
* has finished processing this request.
|
||||
*/
|
||||
u32 seqno;
|
||||
|
||||
/** Position in the ringbuffer of the start of the request */
|
||||
u32 head;
|
||||
|
||||
/**
|
||||
* Position in the ringbuffer of the start of the postfix.
|
||||
* This is required to calculate the maximum available ringbuffer
|
||||
* space without overwriting the postfix.
|
||||
*/
|
||||
u32 postfix;
|
||||
|
||||
/** Position in the ringbuffer of the end of the whole request */
|
||||
u32 tail;
|
||||
|
||||
/** Preallocate space in the ringbuffer for the emitting the request */
|
||||
u32 reserved_space;
|
||||
|
||||
/**
|
||||
* Context and ring buffer related to this request
|
||||
* Contexts are refcounted, so when this request is associated with a
|
||||
* context, we must increment the context's refcount, to guarantee that
|
||||
* it persists while any request is linked to it. Requests themselves
|
||||
* are also refcounted, so the request will only be freed when the last
|
||||
* reference to it is dismissed, and the code in
|
||||
* i915_gem_request_free() will then decrement the refcount on the
|
||||
* context.
|
||||
*/
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
||||
/**
|
||||
* Context related to the previous request.
|
||||
* As the contexts are accessed by the hardware until the switch is
|
||||
* completed to a new context, the hardware may still be writing
|
||||
* to the context object after the breadcrumb is visible. We must
|
||||
* not unpin/unbind/prune that object whilst still active and so
|
||||
* we keep the previous context pinned until the following (this)
|
||||
* request is retired.
|
||||
*/
|
||||
struct i915_gem_context *previous_context;
|
||||
|
||||
/** Batch buffer related to this request if any (used for
|
||||
error state dump only) */
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
|
||||
/** Time at which this request was emitted, in jiffies. */
|
||||
unsigned long emitted_jiffies;
|
||||
|
||||
/** global list entry for this request */
|
||||
struct list_head list;
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
/** file_priv list entry for this request */
|
||||
struct list_head client_list;
|
||||
|
||||
/** process identifier submitting this request */
|
||||
struct pid *pid;
|
||||
|
||||
/**
|
||||
* The ELSP only accepts two elements at a time, so we queue
|
||||
* context/tail pairs on a given queue (ring->execlist_queue) until the
|
||||
* hardware is available. The queue serves a double purpose: we also use
|
||||
* it to keep track of the up to 2 contexts currently in the hardware
|
||||
* (usually one in execution and the other queued up by the GPU): We
|
||||
* only remove elements from the head of the queue when the hardware
|
||||
* informs us that an element has been completed.
|
||||
*
|
||||
* All accesses to the queue are mediated by a spinlock
|
||||
* (ring->execlist_lock).
|
||||
*/
|
||||
|
||||
/** Execlist link in the submission queue.*/
|
||||
struct list_head execlist_link;
|
||||
|
||||
/** Execlists no. of times this request has been sent to the ELSP */
|
||||
int elsp_submitted;
|
||||
|
||||
/** Execlists context hardware id. */
|
||||
unsigned ctx_hw_id;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_request * __must_check
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
void i915_gem_request_free(struct kref *req_ref);
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file);
|
||||
|
||||
static inline uint32_t
|
||||
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->seqno : 0;
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
i915_gem_request_get_engine(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->engine : NULL;
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_request_reference(struct drm_i915_gem_request *req)
|
||||
{
|
||||
if (req)
|
||||
kref_get(&req->ref);
|
||||
return req;
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_request_unreference(struct drm_i915_gem_request *req)
|
||||
{
|
||||
kref_put(&req->ref, i915_gem_request_free);
|
||||
}
|
||||
|
||||
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
|
||||
struct drm_i915_gem_request *src)
|
||||
{
|
||||
if (src)
|
||||
i915_gem_request_reference(src);
|
||||
|
||||
if (*pdst)
|
||||
i915_gem_request_unreference(*pdst);
|
||||
|
||||
*pdst = src;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: i915_gem_request_completed should be here but currently needs the
|
||||
* definition of i915_seqno_passed() which is below. It will be moved in
|
||||
* a later patch when the call to i915_seqno_passed() is obsoleted...
|
||||
*/
|
||||
|
||||
/*
|
||||
* A command that requires special handling by the command parser.
|
||||
*/
|
||||
|
@ -2617,8 +2532,9 @@ struct drm_i915_cmd_descriptor {
|
|||
/*
|
||||
* A table of commands requiring special handling by the command parser.
|
||||
*
|
||||
* Each ring has an array of tables. Each table consists of an array of command
|
||||
* descriptors, which must be sorted with command opcodes in ascending order.
|
||||
* Each engine has an array of tables. Each table consists of an array of
|
||||
* command descriptors, which must be sorted with command opcodes in
|
||||
* ascending order.
|
||||
*/
|
||||
struct drm_i915_cmd_table {
|
||||
const struct drm_i915_cmd_descriptor *table;
|
||||
|
@ -2932,6 +2848,8 @@ extern int i915_resume_switcheroo(struct drm_device *dev);
|
|||
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
int enable_ppgtt);
|
||||
|
||||
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
|
||||
|
||||
/* i915_drv.c */
|
||||
void __printf(3, 4)
|
||||
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
||||
|
@ -3107,11 +3025,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file_priv);
|
||||
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct drm_i915_gem_request *req);
|
||||
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
int i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
|
@ -3150,40 +3063,24 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
|
|||
size_t size);
|
||||
struct drm_i915_gem_object *i915_gem_object_create_from_data(
|
||||
struct drm_device *dev, const void *data, size_t size);
|
||||
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
void i915_gem_vma_destroy(struct i915_vma *vma);
|
||||
|
||||
/* Flags used by pin/bind&friends. */
|
||||
#define PIN_MAPPABLE (1<<0)
|
||||
#define PIN_NONBLOCK (1<<1)
|
||||
#define PIN_GLOBAL (1<<2)
|
||||
#define PIN_OFFSET_BIAS (1<<3)
|
||||
#define PIN_USER (1<<4)
|
||||
#define PIN_UPDATE (1<<5)
|
||||
#define PIN_ZONE_4G (1<<6)
|
||||
#define PIN_HIGH (1<<7)
|
||||
#define PIN_OFFSET_FIXED (1<<8)
|
||||
#define PIN_OFFSET_MASK (~4095)
|
||||
int __must_check
|
||||
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
uint32_t alignment,
|
||||
uint64_t flags);
|
||||
int __must_check
|
||||
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view,
|
||||
uint32_t alignment,
|
||||
uint64_t flags);
|
||||
u64 size,
|
||||
u64 alignment,
|
||||
u64 flags);
|
||||
|
||||
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
|
||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||
/*
|
||||
* BEWARE: Do not use the function below unless you can _absolutely_
|
||||
* _guarantee_ VMA in question is _not in use_ anywhere.
|
||||
*/
|
||||
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
|
||||
void i915_vma_close(struct i915_vma *vma);
|
||||
void i915_vma_destroy(struct i915_vma *vma);
|
||||
|
||||
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
|
@ -3285,10 +3182,10 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
|
|||
|
||||
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
|
||||
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *to,
|
||||
struct drm_i915_gem_request **to_req);
|
||||
struct drm_i915_gem_request *to);
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req);
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags);
|
||||
int i915_gem_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -3299,44 +3196,12 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
|||
struct drm_i915_gem_object *new,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
/**
|
||||
* Returns true if seq1 is later than seq2.
|
||||
*/
|
||||
static inline bool
|
||||
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
{
|
||||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->seqno);
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us);
|
||||
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us)
|
||||
{
|
||||
return (i915_gem_request_started(request) &&
|
||||
__i915_spin_request(request, state, timeout_us));
|
||||
}
|
||||
|
||||
int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
|
||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *engine);
|
||||
|
||||
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
|
||||
{
|
||||
|
@ -3381,24 +3246,13 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
|||
void i915_gem_reset(struct drm_device *dev);
|
||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
|
||||
int __must_check i915_gem_init(struct drm_device *dev);
|
||||
int i915_gem_init_engines(struct drm_device *dev);
|
||||
int __must_check i915_gem_init_hw(struct drm_device *dev);
|
||||
void i915_gem_init_swizzling(struct drm_device *dev);
|
||||
void i915_gem_cleanup_engines(struct drm_device *dev);
|
||||
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
|
||||
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
|
||||
bool interruptible);
|
||||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
bool flush_caches);
|
||||
#define i915_add_request(req) \
|
||||
__i915_add_request(req, NULL, true)
|
||||
#define i915_add_request_no_flush(req) \
|
||||
__i915_add_request(req, NULL, false)
|
||||
int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps);
|
||||
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
|
||||
void i915_gem_resume(struct drm_device *dev);
|
||||
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int __must_check
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
|
@ -3419,11 +3273,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
|||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
uint32_t
|
||||
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
|
||||
uint32_t
|
||||
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
|
||||
int tiling_mode, bool fenced);
|
||||
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
|
||||
int tiling_mode);
|
||||
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
|
||||
int tiling_mode, bool fenced);
|
||||
|
||||
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||
enum i915_cache_level cache_level);
|
||||
|
@ -3444,7 +3297,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
|
|||
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
|
||||
}
|
||||
|
||||
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
|
||||
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
|
||||
const struct i915_ggtt_view *view);
|
||||
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
|
||||
|
@ -3478,7 +3330,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
|
|||
return container_of(vm, struct i915_hw_ppgtt, base);
|
||||
}
|
||||
|
||||
|
||||
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
|
||||
|
@ -3487,18 +3338,6 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
|
|||
unsigned long
|
||||
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline int __must_check
|
||||
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
uint32_t alignment,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
return i915_gem_object_pin(obj, &ggtt->base,
|
||||
alignment, flags | PIN_GLOBAL);
|
||||
}
|
||||
|
||||
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view);
|
||||
static inline void
|
||||
|
@ -3528,6 +3367,7 @@ void i915_gem_context_reset(struct drm_device *dev);
|
|||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_switch_context(struct drm_i915_gem_request *req);
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
|
||||
|
@ -3548,12 +3388,14 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
|
|||
return ctx;
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
|
||||
static inline struct i915_gem_context *
|
||||
i915_gem_context_get(struct i915_gem_context *ctx)
|
||||
{
|
||||
kref_get(&ctx->ref);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
|
||||
static inline void i915_gem_context_put(struct i915_gem_context *ctx)
|
||||
{
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
kref_put(&ctx->ref, i915_gem_context_free);
|
||||
|
@ -3576,13 +3418,10 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
|
|||
struct drm_file *file);
|
||||
|
||||
/* i915_gem_evict.c */
|
||||
int __must_check i915_gem_evict_something(struct drm_device *dev,
|
||||
struct i915_address_space *vm,
|
||||
int min_size,
|
||||
unsigned alignment,
|
||||
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
|
||||
u64 min_size, u64 alignment,
|
||||
unsigned cache_level,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start, u64 end,
|
||||
unsigned flags);
|
||||
int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
|
@ -3634,16 +3473,9 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
|
|||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
i915_gem_object_is_tiled(obj);
|
||||
}
|
||||
|
||||
/* i915_gem_debug.c */
|
||||
#if WATCH_LISTS
|
||||
int i915_verify_lists(struct drm_device *dev);
|
||||
#else
|
||||
#define i915_verify_lists(dev) 0
|
||||
#endif
|
||||
|
||||
/* i915_debugfs.c */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int i915_debugfs_register(struct drm_i915_private *dev_priv);
|
||||
|
@ -3684,15 +3516,15 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
|||
|
||||
/* i915_cmd_parser.c */
|
||||
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
|
||||
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
|
||||
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
|
||||
int i915_parse_cmds(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
u32 batch_len,
|
||||
bool is_master);
|
||||
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
|
||||
bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
|
||||
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct drm_i915_gem_object *shadow_batch_obj,
|
||||
u32 batch_start_offset,
|
||||
u32 batch_len,
|
||||
bool is_master);
|
||||
|
||||
/* i915_suspend.c */
|
||||
extern int i915_save_state(struct drm_device *dev);
|
||||
|
@ -3800,7 +3632,6 @@ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
|
|||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||
bool enable);
|
||||
|
||||
extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
|
||||
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -41,15 +41,15 @@
|
|||
|
||||
/**
|
||||
* i915_gem_batch_pool_init() - initialize a batch buffer pool
|
||||
* @dev: the drm device
|
||||
* @engine: the associated request submission engine
|
||||
* @pool: the batch buffer pool
|
||||
*/
|
||||
void i915_gem_batch_pool_init(struct drm_device *dev,
|
||||
void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
|
||||
struct i915_gem_batch_pool *pool)
|
||||
{
|
||||
int n;
|
||||
|
||||
pool->dev = dev;
|
||||
pool->engine = engine;
|
||||
|
||||
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
|
||||
INIT_LIST_HEAD(&pool->cache_list[n]);
|
||||
|
@ -65,18 +65,17 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
|
|||
{
|
||||
int n;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
|
||||
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
|
||||
|
||||
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
|
||||
while (!list_empty(&pool->cache_list[n])) {
|
||||
struct drm_i915_gem_object *obj =
|
||||
list_first_entry(&pool->cache_list[n],
|
||||
struct drm_i915_gem_object,
|
||||
batch_pool_link);
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
|
||||
list_del(&obj->batch_pool_link);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
}
|
||||
list_for_each_entry_safe(obj, next,
|
||||
&pool->cache_list[n],
|
||||
batch_pool_link)
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
INIT_LIST_HEAD(&pool->cache_list[n]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|||
struct list_head *list;
|
||||
int n;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
|
||||
lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
|
||||
|
||||
/* Compute a power-of-two bucket, but throw everything greater than
|
||||
* 16KiB into the same bucket: i.e. the the buckets hold objects of
|
||||
|
@ -115,13 +114,14 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|||
|
||||
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
|
||||
/* The batches are strictly LRU ordered */
|
||||
if (tmp->active)
|
||||
if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
|
||||
&tmp->base.dev->struct_mutex))
|
||||
break;
|
||||
|
||||
/* While we're looping, do some clean up */
|
||||
if (tmp->madv == __I915_MADV_PURGED) {
|
||||
list_del(&tmp->batch_pool_link);
|
||||
drm_gem_object_unreference(&tmp->base);
|
||||
i915_gem_object_put(tmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
|
|||
if (obj == NULL) {
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(pool->dev, size);
|
||||
obj = i915_gem_object_create(&pool->engine->i915->drm, size);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
|
|
|
@ -27,13 +27,15 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
|
||||
struct intel_engine_cs;
|
||||
|
||||
struct i915_gem_batch_pool {
|
||||
struct drm_device *dev;
|
||||
struct intel_engine_cs *engine;
|
||||
struct list_head cache_list[4];
|
||||
};
|
||||
|
||||
/* i915_gem_batch_pool.c */
|
||||
void i915_gem_batch_pool_init(struct drm_device *dev,
|
||||
void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
|
||||
struct i915_gem_batch_pool *pool);
|
||||
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
|
||||
struct drm_i915_gem_object*
|
||||
|
|
|
@ -134,21 +134,6 @@ static int get_context_size(struct drm_i915_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_context_clean(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||
struct i915_vma *vma, *next;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
||||
vm_link) {
|
||||
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
|
@ -156,13 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
/*
|
||||
* This context is going away and we need to remove all VMAs still
|
||||
* around. This is to handle imported shared objects for which
|
||||
* destructor did not run when their handles were closed.
|
||||
*/
|
||||
i915_gem_context_clean(ctx);
|
||||
GEM_BUG_ON(!ctx->closed);
|
||||
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
|
@ -173,10 +152,10 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
continue;
|
||||
|
||||
WARN_ON(ce->pin_count);
|
||||
if (ce->ringbuf)
|
||||
intel_ringbuffer_free(ce->ringbuf);
|
||||
if (ce->ring)
|
||||
intel_ring_free(ce->ring);
|
||||
|
||||
drm_gem_object_unreference(&ce->state->base);
|
||||
i915_gem_object_put(ce->state);
|
||||
}
|
||||
|
||||
list_del(&ctx->link);
|
||||
|
@ -216,7 +195,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
|||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
if (WARN_ON(ret)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
@ -224,6 +203,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
|||
return obj;
|
||||
}
|
||||
|
||||
static void i915_ppgtt_close(struct i915_address_space *vm)
|
||||
{
|
||||
struct list_head *phases[] = {
|
||||
&vm->active_list,
|
||||
&vm->inactive_list,
|
||||
&vm->unbound_list,
|
||||
NULL,
|
||||
}, **phase;
|
||||
|
||||
GEM_BUG_ON(vm->closed);
|
||||
vm->closed = true;
|
||||
|
||||
for (phase = phases; *phase; phase++) {
|
||||
struct i915_vma *vma, *vn;
|
||||
|
||||
list_for_each_entry_safe(vma, vn, *phase, vm_link)
|
||||
if (!i915_vma_is_closed(vma))
|
||||
i915_vma_close(vma);
|
||||
}
|
||||
}
|
||||
|
||||
static void context_close(struct i915_gem_context *ctx)
|
||||
{
|
||||
GEM_BUG_ON(ctx->closed);
|
||||
ctx->closed = true;
|
||||
if (ctx->ppgtt)
|
||||
i915_ppgtt_close(&ctx->ppgtt->base);
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
|
||||
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
|
||||
{
|
||||
int ret;
|
||||
|
@ -305,7 +315,7 @@ __create_hw_context(struct drm_device *dev,
|
|||
return ctx;
|
||||
|
||||
err_out:
|
||||
i915_gem_context_unreference(ctx);
|
||||
context_close(ctx);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -327,13 +337,14 @@ i915_gem_create_context(struct drm_device *dev,
|
|||
return ctx;
|
||||
|
||||
if (USES_FULL_PPGTT(dev)) {
|
||||
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
i915_ppgtt_create(to_i915(dev), file_priv);
|
||||
|
||||
if (IS_ERR(ppgtt)) {
|
||||
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
||||
PTR_ERR(ppgtt));
|
||||
idr_remove(&file_priv->context_idr, ctx->user_handle);
|
||||
i915_gem_context_unreference(ctx);
|
||||
context_close(ctx);
|
||||
return ERR_CAST(ppgtt);
|
||||
}
|
||||
|
||||
|
@ -390,7 +401,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
|
|||
if (ce->state)
|
||||
i915_gem_object_ggtt_unpin(ce->state);
|
||||
|
||||
i915_gem_context_unreference(ctx);
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,7 +515,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
i915_gem_context_unreference(dctx);
|
||||
context_close(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
|
||||
ida_destroy(&dev_priv->context_hw_ida);
|
||||
|
@ -514,8 +525,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
|
|||
{
|
||||
struct i915_gem_context *ctx = p;
|
||||
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_unreference(ctx);
|
||||
context_close(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -552,11 +562,12 @@ static inline int
|
|||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||
i915_semaphore_is_enabled(dev_priv) ?
|
||||
i915.semaphores ?
|
||||
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
|
||||
0;
|
||||
int len, ret;
|
||||
|
@ -567,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||
* itlb_before_ctx_switch.
|
||||
*/
|
||||
if (IS_GEN6(dev_priv)) {
|
||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
||||
ret = engine->emit_flush(req, EMIT_INVALIDATE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -589,64 +600,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (INTEL_GEN(dev_priv) >= 7) {
|
||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring,
|
||||
MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_engine(signaller, dev_priv) {
|
||||
if (signaller == engine)
|
||||
continue;
|
||||
|
||||
intel_ring_emit_reg(engine,
|
||||
intel_ring_emit_reg(ring,
|
||||
RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring,
|
||||
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_SET_CONTEXT);
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring,
|
||||
i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
|
||||
flags);
|
||||
/*
|
||||
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
||||
* WaMiSetContext_Hang:snb,ivb,vlv
|
||||
*/
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 7) {
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring,
|
||||
MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_engine(signaller, dev_priv) {
|
||||
if (signaller == engine)
|
||||
continue;
|
||||
|
||||
last_reg = RING_PSMI_CTL(signaller->mmio_base);
|
||||
intel_ring_emit_reg(engine, last_reg);
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit_reg(ring, last_reg);
|
||||
intel_ring_emit(ring,
|
||||
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
|
||||
/* Insert a delay before the next switch! */
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring,
|
||||
MI_STORE_REGISTER_MEM |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit_reg(engine, last_reg);
|
||||
intel_ring_emit(engine, engine->scratch.gtt_offset);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit_reg(ring, last_reg);
|
||||
intel_ring_emit(ring, engine->scratch.gtt_offset);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
}
|
||||
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -654,7 +665,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||
static int remap_l3(struct drm_i915_gem_request *req, int slice)
|
||||
{
|
||||
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
int i, ret;
|
||||
|
||||
if (!remap_info)
|
||||
|
@ -669,13 +680,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
|
|||
* here because no other code should access these registers other than
|
||||
* at initialization time.
|
||||
*/
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
|
||||
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
|
||||
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
|
||||
intel_ring_emit(engine, remap_info[i]);
|
||||
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
|
||||
intel_ring_emit(ring, remap_info[i]);
|
||||
}
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -752,9 +763,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|||
return 0;
|
||||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
|
||||
to->ggtt_alignment,
|
||||
0);
|
||||
ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
|
||||
to->ggtt_alignment, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -814,8 +824,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from != NULL) {
|
||||
from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
|
||||
struct drm_i915_gem_object *obj = from->engine[RCS].state;
|
||||
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
|
@ -823,14 +833,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|||
* able to defer doing this until we know the object would be
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
from->engine[RCS].state->dirty = 1;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
|
||||
|
||||
/* obj is kept alive until the next request by its active ref */
|
||||
i915_gem_object_ggtt_unpin(from->engine[RCS].state);
|
||||
i915_gem_context_unreference(from);
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
i915_gem_context_put(from);
|
||||
}
|
||||
i915_gem_context_reference(to);
|
||||
engine->last_context = to;
|
||||
engine->last_context = i915_gem_context_get(to);
|
||||
|
||||
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
||||
* setup, and we do not wish to move them.
|
||||
|
@ -894,8 +904,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
if (!req->ctx->engine[engine->id].state) {
|
||||
struct i915_gem_context *to = req->ctx;
|
||||
|
@ -914,10 +925,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|||
}
|
||||
|
||||
if (to != engine->last_context) {
|
||||
i915_gem_context_reference(to);
|
||||
if (engine->last_context)
|
||||
i915_gem_context_unreference(engine->last_context);
|
||||
engine->last_context = to;
|
||||
i915_gem_context_put(engine->last_context);
|
||||
engine->last_context = i915_gem_context_get(to);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -926,6 +936,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|||
return do_rcs_switch(req);
|
||||
}
|
||||
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
if (engine->last_context == NULL)
|
||||
continue;
|
||||
|
||||
if (engine->last_context == dev_priv->kernel_context)
|
||||
continue;
|
||||
|
||||
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool contexts_enabled(struct drm_device *dev)
|
||||
{
|
||||
return i915.enable_execlists || to_i915(dev)->hw_context_size;
|
||||
|
@ -985,7 +1022,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
idr_remove(&file_priv->context_idr, ctx->user_handle);
|
||||
i915_gem_context_unreference(ctx);
|
||||
context_close(ctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Copyright © 2008 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Keith Packard <keithp@keithp.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#if WATCH_LISTS
|
||||
int
|
||||
i915_verify_lists(struct drm_device *dev)
|
||||
{
|
||||
static int warned;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_engine_cs *engine;
|
||||
int err = 0;
|
||||
|
||||
if (warned)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
list_for_each_entry(obj, &engine->active_list,
|
||||
engine_list[engine->id]) {
|
||||
if (obj->base.dev != dev ||
|
||||
!atomic_read(&obj->base.refcount.refcount)) {
|
||||
DRM_ERROR("%s: freed active obj %p\n",
|
||||
engine->name, obj);
|
||||
err++;
|
||||
break;
|
||||
} else if (!obj->active ||
|
||||
obj->last_read_req[engine->id] == NULL) {
|
||||
DRM_ERROR("%s: invalid active obj %p\n",
|
||||
engine->name, obj);
|
||||
err++;
|
||||
} else if (obj->base.write_domain) {
|
||||
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
|
||||
engine->name,
|
||||
obj, obj->base.write_domain);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return warned = err;
|
||||
}
|
||||
#endif /* WATCH_LIST */
|
|
@ -23,9 +23,13 @@
|
|||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
|
||||
{
|
||||
|
@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
|
|||
.end_cpu_access = i915_gem_end_cpu_access,
|
||||
};
|
||||
|
||||
static void export_fences(struct drm_i915_gem_object *obj,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct reservation_object *resv = dma_buf->resv;
|
||||
struct drm_i915_gem_request *req;
|
||||
unsigned long active;
|
||||
int idx;
|
||||
|
||||
active = __I915_BO_ACTIVE(obj);
|
||||
if (!active)
|
||||
return;
|
||||
|
||||
/* Serialise with execbuf to prevent concurrent fence-loops */
|
||||
mutex_lock(&obj->base.dev->struct_mutex);
|
||||
|
||||
/* Mark the object for future fences before racily adding old fences */
|
||||
obj->base.dma_buf = dma_buf;
|
||||
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
|
||||
for_each_active(active, idx) {
|
||||
req = i915_gem_active_get(&obj->last_read[idx],
|
||||
&obj->base.dev->struct_mutex);
|
||||
if (!req)
|
||||
continue;
|
||||
|
||||
if (reservation_object_reserve_shared(resv) == 0)
|
||||
reservation_object_add_shared_fence(resv, &req->fence);
|
||||
|
||||
i915_gem_request_put(req);
|
||||
}
|
||||
|
||||
req = i915_gem_active_get(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
if (req) {
|
||||
reservation_object_add_excl_fence(resv, &req->fence);
|
||||
i915_gem_request_put(req);
|
||||
}
|
||||
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
mutex_unlock(&obj->base.dev->struct_mutex);
|
||||
}
|
||||
|
||||
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *gem_obj, int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
struct dma_buf *dma_buf;
|
||||
|
||||
exp_info.ops = &i915_dmabuf_ops;
|
||||
exp_info.size = gem_obj->size;
|
||||
exp_info.flags = flags;
|
||||
exp_info.priv = gem_obj;
|
||||
|
||||
|
||||
if (obj->ops->dmabuf_export) {
|
||||
int ret = obj->ops->dmabuf_export(obj);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dma_buf_export(&exp_info);
|
||||
dma_buf = dma_buf_export(&exp_info);
|
||||
if (IS_ERR(dma_buf))
|
||||
return dma_buf;
|
||||
|
||||
export_fences(obj, dma_buf);
|
||||
return dma_buf;
|
||||
}
|
||||
|
||||
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
|
||||
|
@ -278,8 +330,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
|||
* Importing dmabuf exported from out own gem increases
|
||||
* refcount on gem itself instead of f_count of dmabuf.
|
||||
*/
|
||||
drm_gem_object_reference(&obj->base);
|
||||
return &obj->base;
|
||||
return &i915_gem_object_get(obj)->base;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,6 +351,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
|||
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
|
||||
obj->base.import_attach = attach;
|
||||
|
||||
/* We use GTT as shorthand for a coherent domain, one that is
|
||||
* neither in the GPU cache nor in the CPU cache, where all
|
||||
* writes are immediately visible in memory. (That's not strictly
|
||||
* true, but it's close! There are internal buffers such as the
|
||||
* write-combined buffer or a delay through the chipset for GTT
|
||||
* writes that do require us to treat GTT as a separate cache domain.)
|
||||
*/
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
obj->base.write_domain = 0;
|
||||
|
||||
return &obj->base;
|
||||
|
||||
fail_detach:
|
||||
|
|
|
@ -33,41 +33,23 @@
|
|||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
|
||||
static bool
|
||||
gpu_is_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
if (engine->last_context == NULL)
|
||||
continue;
|
||||
|
||||
if (engine->last_context == dev_priv->kernel_context)
|
||||
continue;
|
||||
|
||||
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (intel_engine_is_active(engine))
|
||||
return false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
mark_free(struct i915_vma *vma, struct list_head *unwind)
|
||||
{
|
||||
if (vma->pin_count)
|
||||
if (i915_vma_is_pinned(vma))
|
||||
return false;
|
||||
|
||||
if (WARN_ON(!list_empty(&vma->exec_list)))
|
||||
|
@ -79,7 +61,6 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
|
|||
|
||||
/**
|
||||
* i915_gem_evict_something - Evict vmas to make room for binding a new one
|
||||
* @dev: drm_device
|
||||
* @vm: address space to evict from
|
||||
* @min_size: size of the desired free space
|
||||
* @alignment: alignment constraint of the desired free space
|
||||
|
@ -102,42 +83,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
|
|||
* memory in e.g. the shrinker.
|
||||
*/
|
||||
int
|
||||
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||
int min_size, unsigned alignment, unsigned cache_level,
|
||||
unsigned long start, unsigned long end,
|
||||
i915_gem_evict_something(struct i915_address_space *vm,
|
||||
u64 min_size, u64 alignment,
|
||||
unsigned cache_level,
|
||||
u64 start, u64 end,
|
||||
unsigned flags)
|
||||
{
|
||||
struct list_head eviction_list, unwind_list;
|
||||
struct i915_vma *vma;
|
||||
int ret = 0;
|
||||
int pass = 0;
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct list_head eviction_list;
|
||||
struct list_head *phases[] = {
|
||||
&vm->inactive_list,
|
||||
&vm->active_list,
|
||||
NULL,
|
||||
}, **phase;
|
||||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
|
||||
trace_i915_gem_evict(dev, min_size, alignment, flags);
|
||||
trace_i915_gem_evict(vm, min_size, alignment, flags);
|
||||
|
||||
/*
|
||||
* The goal is to evict objects and amalgamate space in LRU order.
|
||||
* The oldest idle objects reside on the inactive list, which is in
|
||||
* retirement order. The next objects to retire are those on the (per
|
||||
* ring) active list that do not have an outstanding flush. Once the
|
||||
* hardware reports completion (the seqno is updated after the
|
||||
* batchbuffer has been finished) the clean buffer objects would
|
||||
* be retired to the inactive list. Any dirty objects would be added
|
||||
* to the tail of the flushing list. So after processing the clean
|
||||
* active objects we need to emit a MI_FLUSH to retire the flushing
|
||||
* list, hence the retirement order of the flushing list is in
|
||||
* advance of the dirty objects on the active lists.
|
||||
* retirement order. The next objects to retire are those in flight,
|
||||
* on the active list, again in retirement order.
|
||||
*
|
||||
* The retirement sequence is thus:
|
||||
* 1. Inactive objects (already retired)
|
||||
* 2. Clean active objects
|
||||
* 3. Flushing list
|
||||
* 4. Dirty active objects.
|
||||
* 2. Active objects (will stall on unbinding)
|
||||
*
|
||||
* On each list, the oldest objects lie at the HEAD with the freshest
|
||||
* object on the TAIL.
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&unwind_list);
|
||||
if (start != 0 || end != vm->total) {
|
||||
drm_mm_init_scan_with_range(&vm->mm, min_size,
|
||||
alignment, cache_level,
|
||||
|
@ -145,96 +121,84 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
|||
} else
|
||||
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
|
||||
|
||||
search_again:
|
||||
/* First see if there is a large enough contiguous idle region... */
|
||||
list_for_each_entry(vma, &vm->inactive_list, vm_link) {
|
||||
if (mark_free(vma, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
|
||||
if (flags & PIN_NONBLOCK)
|
||||
goto none;
|
||||
phases[1] = NULL;
|
||||
|
||||
/* Now merge in the soon-to-be-expired objects... */
|
||||
list_for_each_entry(vma, &vm->active_list, vm_link) {
|
||||
if (mark_free(vma, &unwind_list))
|
||||
goto found;
|
||||
}
|
||||
search_again:
|
||||
INIT_LIST_HEAD(&eviction_list);
|
||||
phase = phases;
|
||||
do {
|
||||
list_for_each_entry(vma, *phase, vm_link)
|
||||
if (mark_free(vma, &eviction_list))
|
||||
goto found;
|
||||
} while (*++phase);
|
||||
|
||||
none:
|
||||
/* Nothing found, clean up and bail out! */
|
||||
while (!list_empty(&unwind_list)) {
|
||||
vma = list_first_entry(&unwind_list,
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
ret = drm_mm_scan_remove_block(&vma->node);
|
||||
BUG_ON(ret);
|
||||
|
||||
list_del_init(&vma->exec_list);
|
||||
INIT_LIST_HEAD(&vma->exec_list);
|
||||
}
|
||||
|
||||
/* Can we unpin some objects such as idle hw contents,
|
||||
* or pending flips?
|
||||
* or pending flips? But since only the GGTT has global entries
|
||||
* such as scanouts, rinbuffers and contexts, we can skip the
|
||||
* purge when inspecting per-process local address spaces.
|
||||
*/
|
||||
if (flags & PIN_NONBLOCK)
|
||||
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
|
||||
return -ENOSPC;
|
||||
|
||||
/* Only idle the GPU and repeat the search once */
|
||||
if (pass++ == 0) {
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
goto search_again;
|
||||
if (gpu_is_idle(dev_priv)) {
|
||||
/* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
|
||||
}
|
||||
|
||||
/* If we still have pending pageflip completions, drop
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
/* Not everything in the GGTT is tracked via vma (otherwise we
|
||||
* could evict as required with minimal stalling) so we are forced
|
||||
* to idle the GPU and explicitly retire outstanding requests in
|
||||
* the hopes that we can then remove contexts and the like only
|
||||
* bound by their active reference.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
goto search_again;
|
||||
|
||||
found:
|
||||
/* drm_mm doesn't allow any other other operations while
|
||||
* scanning, therefore store to be evicted objects on a
|
||||
* temporary list. */
|
||||
INIT_LIST_HEAD(&eviction_list);
|
||||
while (!list_empty(&unwind_list)) {
|
||||
vma = list_first_entry(&unwind_list,
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
if (drm_mm_scan_remove_block(&vma->node)) {
|
||||
list_move(&vma->exec_list, &eviction_list);
|
||||
drm_gem_object_reference(&vma->obj->base);
|
||||
continue;
|
||||
}
|
||||
list_del_init(&vma->exec_list);
|
||||
* scanning, therefore store to-be-evicted objects on a
|
||||
* temporary list and take a reference for all before
|
||||
* calling unbind (which may remove the active reference
|
||||
* of any of our objects, thus corrupting the list).
|
||||
*/
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
if (drm_mm_scan_remove_block(&vma->node))
|
||||
__i915_vma_pin(vma);
|
||||
else
|
||||
list_del_init(&vma->exec_list);
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
while (!list_empty(&eviction_list)) {
|
||||
struct drm_gem_object *obj;
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
exec_list);
|
||||
|
||||
obj = &vma->obj->base;
|
||||
list_del_init(&vma->exec_list);
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
ret = i915_vma_unbind(vma);
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -256,8 +220,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
|
|||
|
||||
vma = container_of(node, typeof(*vma), node);
|
||||
|
||||
if (vma->pin_count) {
|
||||
if (!vma->exec_entry || (vma->pin_count > 1))
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
|
||||
/* Object is pinned for some other use */
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -303,22 +267,21 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
|||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = switch_to_pinned_context(dev_priv);
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
ret = i915_gem_wait_for_idle(dev_priv, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
WARN_ON(!list_empty(&vm->active_list));
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
|
||||
if (vma->pin_count == 0)
|
||||
if (!i915_vma_is_pinned(vma))
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -26,21 +26,38 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_dmabuf.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||
#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
|
||||
|
||||
#define BATCH_OFFSET_BIAS (256*1024)
|
||||
|
||||
struct i915_execbuffer_params {
|
||||
struct drm_device *dev;
|
||||
struct drm_file *file;
|
||||
struct i915_vma *batch;
|
||||
u32 dispatch_flags;
|
||||
u32 args_batch_start_offset;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_context *ctx;
|
||||
struct drm_i915_gem_request *request;
|
||||
};
|
||||
|
||||
struct eb_vmas {
|
||||
struct list_head vmas;
|
||||
int and;
|
||||
|
@ -89,6 +106,26 @@ eb_reset(struct eb_vmas *eb)
|
|||
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
eb_get_batch(struct eb_vmas *eb)
|
||||
{
|
||||
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
|
||||
|
||||
/*
|
||||
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
||||
* to negative relocation deltas. Usually that works out ok since the
|
||||
* relocate address is still positive, except when the batch is placed
|
||||
* very low in the GTT. Ensure this doesn't happen.
|
||||
*
|
||||
* Note that actual hangs have only been observed on gen7, but for
|
||||
* paranoia do it everywhere.
|
||||
*/
|
||||
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
|
||||
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static int
|
||||
eb_lookup_vmas(struct eb_vmas *eb,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
|
@ -122,7 +159,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
|||
goto err;
|
||||
}
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
i915_gem_object_get(obj);
|
||||
list_add_tail(&obj->obj_exec_link, &objects);
|
||||
}
|
||||
spin_unlock(&file->table_lock);
|
||||
|
@ -175,7 +212,7 @@ err:
|
|||
struct drm_i915_gem_object,
|
||||
obj_exec_link);
|
||||
list_del_init(&obj->obj_exec_link);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
/*
|
||||
* Objects already transfered to the vmas list will be unreferenced by
|
||||
|
@ -219,7 +256,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
|
|||
i915_gem_object_unpin_fence(obj);
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
|
||||
vma->pin_count--;
|
||||
__i915_vma_unpin(vma);
|
||||
|
||||
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
|
||||
}
|
||||
|
@ -234,7 +271,7 @@ static void eb_destroy(struct eb_vmas *eb)
|
|||
exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
i915_gem_object_put(vma->obj);
|
||||
}
|
||||
kfree(eb);
|
||||
}
|
||||
|
@ -399,6 +436,20 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool object_is_idle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
unsigned long active = i915_gem_object_get_active(obj);
|
||||
int idx;
|
||||
|
||||
for_each_active(active, idx) {
|
||||
if (!i915_gem_active_is_idle(&obj->last_read[idx],
|
||||
&obj->base.dev->struct_mutex))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
struct eb_vmas *eb,
|
||||
|
@ -482,7 +533,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
/* We can't wait for rendering with pagefaults disabled */
|
||||
if (obj->active && pagefault_disabled())
|
||||
if (pagefault_disabled() && !object_is_idle(obj))
|
||||
return -EFAULT;
|
||||
|
||||
if (use_cpu_reloc(obj))
|
||||
|
@ -626,12 +677,16 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
|||
flags |= PIN_HIGH;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
|
||||
if ((ret == -ENOSPC || ret == -E2BIG) &&
|
||||
ret = i915_vma_pin(vma,
|
||||
entry->pad_to_size,
|
||||
entry->alignment,
|
||||
flags);
|
||||
if ((ret == -ENOSPC || ret == -E2BIG) &&
|
||||
only_mappable_for_reloc(entry->flags))
|
||||
ret = i915_gem_object_pin(obj, vma->vm,
|
||||
entry->alignment,
|
||||
flags & ~PIN_MAPPABLE);
|
||||
ret = i915_vma_pin(vma,
|
||||
entry->pad_to_size,
|
||||
entry->alignment,
|
||||
flags & ~PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -667,7 +722,7 @@ need_reloc_mappable(struct i915_vma *vma)
|
|||
if (entry->relocation_count == 0)
|
||||
return false;
|
||||
|
||||
if (!vma->is_ggtt)
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
return false;
|
||||
|
||||
/* See also use_cpu_reloc() */
|
||||
|
@ -686,12 +741,16 @@ eb_vma_misplaced(struct i915_vma *vma)
|
|||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
|
||||
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
||||
!i915_vma_is_ggtt(vma));
|
||||
|
||||
if (entry->alignment &&
|
||||
vma->node.start & (entry->alignment - 1))
|
||||
return true;
|
||||
|
||||
if (vma->node.size < entry->pad_to_size)
|
||||
return true;
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_PINNED &&
|
||||
vma->node.start != entry->offset)
|
||||
return true;
|
||||
|
@ -725,8 +784,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
|||
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
|
||||
int retry;
|
||||
|
||||
i915_gem_retire_requests_ring(engine);
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
INIT_LIST_HEAD(&ordered_vmas);
|
||||
|
@ -746,7 +803,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
|||
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||
need_fence =
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
i915_gem_object_is_tiled(obj);
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_PINNED)
|
||||
|
@ -843,7 +900,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
|||
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
drm_gem_object_unreference(&vma->obj->base);
|
||||
i915_gem_object_put(vma->obj);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
@ -937,11 +994,21 @@ err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
|
||||
{
|
||||
unsigned int mask;
|
||||
|
||||
mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
|
||||
mask <<= I915_BO_ACTIVE_SHIFT;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
const unsigned other_rings = ~intel_engine_flag(req->engine);
|
||||
const unsigned int other_rings = eb_other_engines(req);
|
||||
struct i915_vma *vma;
|
||||
uint32_t flush_domains = 0;
|
||||
bool flush_chipset = false;
|
||||
|
@ -950,8 +1017,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->active & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, req->engine, &req);
|
||||
if (obj->flags & other_rings) {
|
||||
ret = i915_gem_object_sync(obj, req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -968,10 +1035,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||
wmb();
|
||||
|
||||
/* Unconditionally invalidate gpu caches and ensure that we do flush
|
||||
* any residual writes from the previous batch.
|
||||
*/
|
||||
return intel_ring_invalidate_all_caches(req);
|
||||
/* Unconditionally invalidate GPU caches and TLBs. */
|
||||
return req->engine->emit_flush(req, EMIT_INVALIDATE);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -1007,6 +1072,9 @@ validate_exec_list(struct drm_device *dev,
|
|||
unsigned invalid_flags;
|
||||
int i;
|
||||
|
||||
/* INTERNAL flags must not overlap with external ones */
|
||||
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
|
||||
|
||||
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
||||
if (USES_FULL_PPGTT(dev))
|
||||
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||
|
@ -1036,6 +1104,14 @@ validate_exec_list(struct drm_device *dev,
|
|||
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
|
||||
return -EINVAL;
|
||||
|
||||
/* pad_to_size was once a reserved field, so sanitize it */
|
||||
if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
|
||||
if (offset_in_page(exec[i].pad_to_size))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
exec[i].pad_to_size = 0;
|
||||
}
|
||||
|
||||
/* First check for malicious input causing overflow in
|
||||
* the worst case where we need to allocate the entire
|
||||
* relocation tree as a single array.
|
||||
|
@ -1086,66 +1162,106 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
|||
return ctx;
|
||||
}
|
||||
|
||||
void
|
||||
void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned int idx = req->engine->id;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
obj->dirty = 1; /* be paranoid */
|
||||
|
||||
/* Add a reference if we're newly entering the active list.
|
||||
* The order in which we add operations to the retirement queue is
|
||||
* vital here: mark_active adds to the start of the callback list,
|
||||
* such that subsequent callbacks are called first. Therefore we
|
||||
* add the active reference first and queue for it to be dropped
|
||||
* *last*.
|
||||
*/
|
||||
if (!i915_gem_object_is_active(obj))
|
||||
i915_gem_object_get(obj);
|
||||
i915_gem_object_set_active(obj, idx);
|
||||
i915_gem_active_set(&obj->last_read[idx], req);
|
||||
|
||||
if (flags & EXEC_OBJECT_WRITE) {
|
||||
i915_gem_active_set(&obj->last_write, req);
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CS);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
|
||||
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
i915_gem_active_set(&obj->last_fence, req);
|
||||
if (flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
}
|
||||
|
||||
i915_vma_set_active(vma, idx);
|
||||
i915_gem_active_set(&vma->last_read[idx], req);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->active_list);
|
||||
}
|
||||
|
||||
static void eb_export_fence(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct reservation_object *resv;
|
||||
|
||||
resv = i915_gem_object_get_dmabuf_resv(obj);
|
||||
if (!resv)
|
||||
return;
|
||||
|
||||
/* Ignore errors from failing to allocate the new fence, we can't
|
||||
* handle an error right now. Worst case should be missed
|
||||
* synchronisation leading to rendering corruption.
|
||||
*/
|
||||
ww_mutex_lock(&resv->lock, NULL);
|
||||
if (flags & EXEC_OBJECT_WRITE)
|
||||
reservation_object_add_excl_fence(resv, &req->fence);
|
||||
else if (reservation_object_reserve_shared(resv) == 0)
|
||||
reservation_object_add_shared_fence(resv, &req->fence);
|
||||
ww_mutex_unlock(&resv->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
|
||||
obj->dirty = 1; /* be paranoid */
|
||||
obj->base.write_domain = obj->base.pending_write_domain;
|
||||
if (obj->base.write_domain == 0)
|
||||
if (obj->base.write_domain)
|
||||
vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
|
||||
else
|
||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
|
||||
i915_vma_move_to_active(vma, req);
|
||||
if (obj->base.write_domain) {
|
||||
i915_gem_request_assign(&obj->last_write_req, req);
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CS);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
i915_gem_request_assign(&obj->last_fenced_req, req);
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
|
||||
eb_export_fence(obj, req, vma->exec_entry->flags);
|
||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
params->engine->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
__i915_add_request(params->request, params->batch_obj, true);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
struct drm_i915_gem_request *req)
|
||||
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_ring *ring = req->ring;
|
||||
int ret, i;
|
||||
|
||||
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
|
||||
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
|
||||
DRM_DEBUG("sol reset is gen7/rcs only\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1155,21 +1271,21 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
|||
return ret;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit(engine, 0);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
|
||||
intel_ring_emit(ring, 0);
|
||||
}
|
||||
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object*
|
||||
static struct i915_vma*
|
||||
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
|
||||
struct eb_vmas *eb,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
struct eb_vmas *eb,
|
||||
u32 batch_start_offset,
|
||||
u32 batch_len,
|
||||
bool is_master)
|
||||
|
@ -1181,18 +1297,18 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
|
|||
shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
|
||||
PAGE_ALIGN(batch_len));
|
||||
if (IS_ERR(shadow_batch_obj))
|
||||
return shadow_batch_obj;
|
||||
return ERR_CAST(shadow_batch_obj);
|
||||
|
||||
ret = i915_parse_cmds(engine,
|
||||
batch_obj,
|
||||
shadow_batch_obj,
|
||||
batch_start_offset,
|
||||
batch_len,
|
||||
is_master);
|
||||
ret = intel_engine_cmd_parser(engine,
|
||||
batch_obj,
|
||||
shadow_batch_obj,
|
||||
batch_start_offset,
|
||||
batch_len,
|
||||
is_master);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
|
||||
ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -1203,29 +1319,25 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
|
|||
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
|
||||
vma->exec_entry = shadow_exec_entry;
|
||||
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
|
||||
drm_gem_object_reference(&shadow_batch_obj->base);
|
||||
i915_gem_object_get(shadow_batch_obj);
|
||||
list_add_tail(&vma->exec_list, &eb->vmas);
|
||||
|
||||
shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
return shadow_batch_obj;
|
||||
return vma;
|
||||
|
||||
err:
|
||||
i915_gem_object_unpin_pages(shadow_batch_obj);
|
||||
if (ret == -EACCES) /* unhandled chained batch */
|
||||
return batch_obj;
|
||||
return NULL;
|
||||
else
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas)
|
||||
static int
|
||||
execbuf_submit(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *engine = params->engine;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *dev_priv = params->request->i915;
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
|
@ -1239,34 +1351,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
|
||||
"%s didn't clear reload\n", engine->name);
|
||||
|
||||
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
|
||||
instp_mask = I915_EXEC_CONSTANTS_MASK;
|
||||
switch (instp_mode) {
|
||||
case I915_EXEC_CONSTANTS_REL_GENERAL:
|
||||
case I915_EXEC_CONSTANTS_ABSOLUTE:
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
|
||||
if (instp_mode != 0 && params->engine->id != RCS) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (instp_mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
if (INTEL_INFO(dev_priv)->gen < 4) {
|
||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 5 &&
|
||||
if (INTEL_INFO(dev_priv)->gen > 5 &&
|
||||
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The HW changed the meaning on this bit on gen6 */
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
if (INTEL_INFO(dev_priv)->gen >= 6)
|
||||
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
|
||||
}
|
||||
break;
|
||||
|
@ -1275,37 +1384,39 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (engine == &dev_priv->engine[RCS] &&
|
||||
if (params->engine->id == RCS &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
struct intel_ring *ring = params->request->ring;
|
||||
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, INSTPM);
|
||||
intel_ring_emit(engine, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, INSTPM);
|
||||
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
dev_priv->relative_constants_mode = instp_mode;
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
ret = i915_reset_gen7_sol_offsets(dev, params->request);
|
||||
ret = i915_reset_gen7_sol_offsets(params->request);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
exec_len = args->batch_len;
|
||||
exec_start = params->batch_obj_vm_offset +
|
||||
exec_start = params->batch->node.start +
|
||||
params->args_batch_start_offset;
|
||||
|
||||
if (exec_len == 0)
|
||||
exec_len = params->batch_obj->base.size;
|
||||
exec_len = params->batch->size;
|
||||
|
||||
ret = engine->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
ret = params->engine->emit_bb_start(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1318,43 +1429,24 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
|||
|
||||
/**
|
||||
* Find one BSD ring to dispatch the corresponding BSD command.
|
||||
* The ring index is returned.
|
||||
* The engine index is returned.
|
||||
*/
|
||||
static unsigned int
|
||||
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
|
||||
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_ring < 0) {
|
||||
if ((int)file_priv->bsd_engine < 0) {
|
||||
/* If not, use the ping-pong mechanism to select one. */
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
|
||||
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
|
||||
file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
|
||||
dev_priv->mm.bsd_engine_dispatch_index ^= 1;
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return file_priv->bsd_ring;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
eb_get_batch(struct eb_vmas *eb)
|
||||
{
|
||||
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
|
||||
|
||||
/*
|
||||
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
||||
* to negative relocation deltas. Usually that works out ok since the
|
||||
* relocate address is still positive, except when the batch is placed
|
||||
* very low in the GTT. Ensure this doesn't happen.
|
||||
*
|
||||
* Note that actual hangs have only been observed on gen7, but for
|
||||
* paranoia do it everywhere.
|
||||
*/
|
||||
if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
|
||||
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
|
||||
return vma->obj;
|
||||
return file_priv->bsd_engine;
|
||||
}
|
||||
|
||||
#define I915_USER_RINGS (4)
|
||||
|
@ -1367,31 +1459,31 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
|
|||
[I915_EXEC_VEBOX] = VECS
|
||||
};
|
||||
|
||||
static int
|
||||
eb_select_ring(struct drm_i915_private *dev_priv,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct intel_engine_cs **ring)
|
||||
static struct intel_engine_cs *
|
||||
eb_select_engine(struct drm_i915_private *dev_priv,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args)
|
||||
{
|
||||
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (user_ring_id > I915_USER_RINGS) {
|
||||
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((user_ring_id != I915_EXEC_BSD) &&
|
||||
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
|
||||
DRM_DEBUG("execbuf with non bsd ring but with invalid "
|
||||
"bsd dispatch flags: %d\n", (int)(args->flags));
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
|
||||
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
||||
|
||||
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
||||
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
|
||||
bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
|
||||
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
|
||||
bsd_idx <= I915_EXEC_BSD_RING2) {
|
||||
bsd_idx >>= I915_EXEC_BSD_SHIFT;
|
||||
|
@ -1399,20 +1491,20 @@ eb_select_ring(struct drm_i915_private *dev_priv,
|
|||
} else {
|
||||
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
|
||||
bsd_idx);
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*ring = &dev_priv->engine[_VCS(bsd_idx)];
|
||||
engine = &dev_priv->engine[_VCS(bsd_idx)];
|
||||
} else {
|
||||
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
|
||||
engine = &dev_priv->engine[user_ring_map[user_ring_id]];
|
||||
}
|
||||
|
||||
if (!intel_engine_initialized(*ring)) {
|
||||
if (!intel_engine_initialized(engine)) {
|
||||
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return engine;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1423,9 +1515,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct drm_i915_gem_request *req = NULL;
|
||||
struct eb_vmas *eb;
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct drm_i915_gem_exec_object2 shadow_exec_entry;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_context *ctx;
|
||||
|
@ -1454,9 +1544,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
if (args->flags & I915_EXEC_IS_PINNED)
|
||||
dispatch_flags |= I915_DISPATCH_PINNED;
|
||||
|
||||
ret = eb_select_ring(dev_priv, file, args, &engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
engine = eb_select_engine(dev_priv, file, args);
|
||||
if (!engine)
|
||||
return -EINVAL;
|
||||
|
||||
if (args->buffer_count < 1) {
|
||||
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
|
||||
|
@ -1496,7 +1586,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
i915_gem_context_reference(ctx);
|
||||
i915_gem_context_get(ctx);
|
||||
|
||||
if (ctx->ppgtt)
|
||||
vm = &ctx->ppgtt->base;
|
||||
|
@ -1507,7 +1597,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
|
||||
eb = eb_create(args);
|
||||
if (eb == NULL) {
|
||||
i915_gem_context_unreference(ctx);
|
||||
i915_gem_context_put(ctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -ENOMEM;
|
||||
goto pre_mutex_err;
|
||||
|
@ -1519,7 +1609,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
|
||||
/* take note of the batch buffer before we might reorder the lists */
|
||||
batch_obj = eb_get_batch(eb);
|
||||
params->batch = eb_get_batch(eb);
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
|
@ -1543,34 +1633,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/* Set the pending read domains for the batch buffer to COMMAND */
|
||||
if (batch_obj->base.pending_write_domain) {
|
||||
if (params->batch->obj->base.pending_write_domain) {
|
||||
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
params->args_batch_start_offset = args->batch_start_offset;
|
||||
if (i915_needs_cmd_parser(engine) && args->batch_len) {
|
||||
struct drm_i915_gem_object *parsed_batch_obj;
|
||||
if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
parsed_batch_obj = i915_gem_execbuffer_parse(engine,
|
||||
&shadow_exec_entry,
|
||||
eb,
|
||||
batch_obj,
|
||||
args->batch_start_offset,
|
||||
args->batch_len,
|
||||
drm_is_current_master(file));
|
||||
if (IS_ERR(parsed_batch_obj)) {
|
||||
ret = PTR_ERR(parsed_batch_obj);
|
||||
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
|
||||
params->batch->obj,
|
||||
eb,
|
||||
args->batch_start_offset,
|
||||
args->batch_len,
|
||||
drm_is_current_master(file));
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* parsed_batch_obj == batch_obj means batch not fully parsed:
|
||||
* Accept, but don't promote to secure.
|
||||
*/
|
||||
|
||||
if (parsed_batch_obj != batch_obj) {
|
||||
if (vma) {
|
||||
/*
|
||||
* Batch parsed and accepted:
|
||||
*
|
||||
|
@ -1582,16 +1666,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
*/
|
||||
dispatch_flags |= I915_DISPATCH_SECURE;
|
||||
params->args_batch_start_offset = 0;
|
||||
batch_obj = parsed_batch_obj;
|
||||
params->batch = vma;
|
||||
}
|
||||
}
|
||||
|
||||
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
||||
params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||
* hsw should have this fixed, but bdw mucks it up again. */
|
||||
if (dispatch_flags & I915_DISPATCH_SECURE) {
|
||||
struct drm_i915_gem_object *obj = params->batch->obj;
|
||||
|
||||
/*
|
||||
* So on first glance it looks freaky that we pin the batch here
|
||||
* outside of the reservation loop. But:
|
||||
|
@ -1602,22 +1688,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
* fitting due to fragmentation.
|
||||
* So this is actually safe.
|
||||
*/
|
||||
ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
|
||||
ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
|
||||
} else
|
||||
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
|
||||
params->batch = i915_gem_obj_to_ggtt(obj);
|
||||
}
|
||||
|
||||
/* Allocate a request for this batch buffer nice and early. */
|
||||
req = i915_gem_request_alloc(engine, ctx);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
params->request = i915_gem_request_alloc(engine, ctx);
|
||||
if (IS_ERR(params->request)) {
|
||||
ret = PTR_ERR(params->request);
|
||||
goto err_batch_unpin;
|
||||
}
|
||||
|
||||
ret = i915_gem_request_add_to_client(req, file);
|
||||
ret = i915_gem_request_add_to_client(params->request, file);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
|
@ -1631,13 +1716,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||
params->file = file;
|
||||
params->engine = engine;
|
||||
params->dispatch_flags = dispatch_flags;
|
||||
params->batch_obj = batch_obj;
|
||||
params->ctx = ctx;
|
||||
params->request = req;
|
||||
|
||||
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
|
||||
ret = execbuf_submit(params, args, &eb->vmas);
|
||||
err_request:
|
||||
i915_gem_execbuffer_retire_commands(params);
|
||||
__i915_add_request(params->request, params->batch->obj, ret == 0);
|
||||
|
||||
err_batch_unpin:
|
||||
/*
|
||||
|
@ -1647,11 +1730,10 @@ err_batch_unpin:
|
|||
* active.
|
||||
*/
|
||||
if (dispatch_flags & I915_DISPATCH_SECURE)
|
||||
i915_gem_object_ggtt_unpin(batch_obj);
|
||||
|
||||
i915_vma_unpin(params->batch);
|
||||
err:
|
||||
/* the request owns the ref now */
|
||||
i915_gem_context_unreference(ctx);
|
||||
i915_gem_context_put(ctx);
|
||||
eb_destroy(eb);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
|
@ -86,20 +86,22 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
|||
|
||||
if (obj) {
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
unsigned int tiling = i915_gem_object_get_tiling(obj);
|
||||
unsigned int stride = i915_gem_object_get_stride(obj);
|
||||
uint64_t val;
|
||||
|
||||
/* Adjust fence size to match tiled area */
|
||||
if (obj->tiling_mode != I915_TILING_NONE) {
|
||||
uint32_t row_size = obj->stride *
|
||||
(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
|
||||
if (tiling != I915_TILING_NONE) {
|
||||
uint32_t row_size = stride *
|
||||
(tiling == I915_TILING_Y ? 32 : 8);
|
||||
size = (size / row_size) * row_size;
|
||||
}
|
||||
|
||||
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
|
||||
0xfffff000) << 32;
|
||||
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
|
||||
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
val |= (uint64_t)((stride / 128) - 1) << fence_pitch_shift;
|
||||
if (tiling == I915_TILING_Y)
|
||||
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
||||
val |= I965_FENCE_REG_VALID;
|
||||
|
||||
|
@ -122,6 +124,8 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
|
||||
if (obj) {
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
unsigned int tiling = i915_gem_object_get_tiling(obj);
|
||||
unsigned int stride = i915_gem_object_get_stride(obj);
|
||||
int pitch_val;
|
||||
int tile_width;
|
||||
|
||||
|
@ -131,17 +135,17 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
|||
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
|
||||
|
||||
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
||||
if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
||||
tile_width = 128;
|
||||
else
|
||||
tile_width = 512;
|
||||
|
||||
/* Note: pitch better be a power of two tile widths */
|
||||
pitch_val = obj->stride / tile_width;
|
||||
pitch_val = stride / tile_width;
|
||||
pitch_val = ffs(pitch_val) - 1;
|
||||
|
||||
val = i915_gem_obj_ggtt_offset(obj);
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
if (tiling == I915_TILING_Y)
|
||||
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
||||
val |= I915_FENCE_SIZE_BITS(size);
|
||||
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
||||
|
@ -161,6 +165,8 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
|||
|
||||
if (obj) {
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
unsigned int tiling = i915_gem_object_get_tiling(obj);
|
||||
unsigned int stride = i915_gem_object_get_stride(obj);
|
||||
uint32_t pitch_val;
|
||||
|
||||
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
|
||||
|
@ -169,11 +175,11 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
|||
"object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
|
||||
i915_gem_obj_ggtt_offset(obj), size);
|
||||
|
||||
pitch_val = obj->stride / 128;
|
||||
pitch_val = stride / 128;
|
||||
pitch_val = ffs(pitch_val) - 1;
|
||||
|
||||
val = i915_gem_obj_ggtt_offset(obj);
|
||||
if (obj->tiling_mode == I915_TILING_Y)
|
||||
if (tiling == I915_TILING_Y)
|
||||
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
|
||||
val |= I830_FENCE_SIZE_BITS(size);
|
||||
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
|
||||
|
@ -201,9 +207,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
|||
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
|
||||
mb();
|
||||
|
||||
WARN(obj && (!obj->stride || !obj->tiling_mode),
|
||||
WARN(obj &&
|
||||
(!i915_gem_object_get_stride(obj) ||
|
||||
!i915_gem_object_get_tiling(obj)),
|
||||
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
|
||||
obj->stride, obj->tiling_mode);
|
||||
i915_gem_object_get_stride(obj),
|
||||
i915_gem_object_get_tiling(obj));
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
i830_write_fence_reg(dev, reg, obj);
|
||||
|
@ -248,7 +257,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
|
|||
|
||||
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->tiling_mode)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
i915_gem_release_mmap(obj);
|
||||
|
||||
/* As we do not have an associated fence register, we will force
|
||||
|
@ -261,15 +270,8 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
|
|||
static int
|
||||
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->last_fenced_req) {
|
||||
int ret = i915_wait_request(obj->last_fenced_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_request_assign(&obj->last_fenced_req, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return i915_gem_active_retire(&obj->last_fence,
|
||||
&obj->base.dev->struct_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -368,7 +370,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
|
|||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool enable = obj->tiling_mode != I915_TILING_NONE;
|
||||
bool enable = i915_gem_object_is_tiled(obj);
|
||||
struct drm_i915_fence_reg *reg;
|
||||
int ret;
|
||||
|
||||
|
@ -438,7 +440,7 @@ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
|
|||
|
||||
WARN_ON(!ggtt_vma ||
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count >
|
||||
ggtt_vma->pin_count);
|
||||
i915_vma_pin_count(ggtt_vma));
|
||||
dev_priv->fence_regs[obj->fence_reg].pin_count++;
|
||||
return true;
|
||||
} else
|
||||
|
@ -484,7 +486,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
|
|||
*/
|
||||
if (reg->obj) {
|
||||
i915_gem_object_update_fence(reg->obj, reg,
|
||||
reg->obj->tiling_mode);
|
||||
i915_gem_object_get_tiling(reg->obj));
|
||||
} else {
|
||||
i915_gem_write_fence(dev, i, NULL);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -36,6 +36,8 @@
|
|||
|
||||
#include <linux/io-mapping.h>
|
||||
|
||||
#include "i915_gem_request.h"
|
||||
|
||||
struct drm_i915_file_private;
|
||||
|
||||
typedef uint32_t gen6_pte_t;
|
||||
|
@ -178,12 +180,32 @@ struct i915_vma {
|
|||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
void __iomem *iomap;
|
||||
u64 size;
|
||||
|
||||
unsigned int flags;
|
||||
/**
|
||||
* How many users have pinned this object in GTT space. The following
|
||||
* users can each hold at most one reference: pwrite/pread, execbuffer
|
||||
* (objects are not allowed multiple times for the same batchbuffer),
|
||||
* and the framebuffer code. When switching/pageflipping, the
|
||||
* framebuffer code has at most two buffers pinned per crtc.
|
||||
*
|
||||
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
|
||||
* bits with absolutely no headroom. So use 4 bits.
|
||||
*/
|
||||
#define I915_VMA_PIN_MASK 0xf
|
||||
#define I915_VMA_PIN_OVERFLOW BIT(5)
|
||||
|
||||
/** Flags and address space this VMA is bound to */
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
#define LOCAL_BIND (1<<1)
|
||||
unsigned int bound : 4;
|
||||
bool is_ggtt : 1;
|
||||
#define I915_VMA_GLOBAL_BIND BIT(6)
|
||||
#define I915_VMA_LOCAL_BIND BIT(7)
|
||||
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
|
||||
|
||||
#define I915_VMA_GGTT BIT(8)
|
||||
#define I915_VMA_CLOSED BIT(9)
|
||||
|
||||
unsigned int active;
|
||||
struct i915_gem_active last_read[I915_NUM_ENGINES];
|
||||
|
||||
/**
|
||||
* Support different GGTT views into the same object.
|
||||
|
@ -208,20 +230,46 @@ struct i915_vma {
|
|||
struct hlist_node exec_node;
|
||||
unsigned long exec_handle;
|
||||
struct drm_i915_gem_exec_object2 *exec_entry;
|
||||
|
||||
/**
|
||||
* How many users have pinned this object in GTT space. The following
|
||||
* users can each hold at most one reference: pwrite/pread, execbuffer
|
||||
* (objects are not allowed multiple times for the same batchbuffer),
|
||||
* and the framebuffer code. When switching/pageflipping, the
|
||||
* framebuffer code has at most two buffers pinned per crtc.
|
||||
*
|
||||
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
|
||||
* bits with absolutely no headroom. So use 4 bits. */
|
||||
unsigned int pin_count:4;
|
||||
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
|
||||
};
|
||||
|
||||
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
|
||||
{
|
||||
return vma->flags & I915_VMA_GGTT;
|
||||
}
|
||||
|
||||
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
|
||||
{
|
||||
return vma->flags & I915_VMA_CLOSED;
|
||||
}
|
||||
|
||||
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
|
||||
{
|
||||
return vma->active;
|
||||
}
|
||||
|
||||
static inline bool i915_vma_is_active(const struct i915_vma *vma)
|
||||
{
|
||||
return i915_vma_get_active(vma);
|
||||
}
|
||||
|
||||
static inline void i915_vma_set_active(struct i915_vma *vma,
|
||||
unsigned int engine)
|
||||
{
|
||||
vma->active |= BIT(engine);
|
||||
}
|
||||
|
||||
static inline void i915_vma_clear_active(struct i915_vma *vma,
|
||||
unsigned int engine)
|
||||
{
|
||||
vma->active &= ~BIT(engine);
|
||||
}
|
||||
|
||||
static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
|
||||
unsigned int engine)
|
||||
{
|
||||
return vma->active & BIT(engine);
|
||||
}
|
||||
|
||||
struct i915_page_dma {
|
||||
struct page *page;
|
||||
union {
|
||||
|
@ -272,11 +320,20 @@ struct i915_pml4 {
|
|||
struct i915_address_space {
|
||||
struct drm_mm mm;
|
||||
struct drm_device *dev;
|
||||
/* Every address space belongs to a struct file - except for the global
|
||||
* GTT that is owned by the driver (and so @file is set to NULL). In
|
||||
* principle, no information should leak from one context to another
|
||||
* (or between files/processes etc) unless explicitly shared by the
|
||||
* owner. Tracking the owner is important in order to free up per-file
|
||||
* objects along with the file, to aide resource tracking, and to
|
||||
* assign blame.
|
||||
*/
|
||||
struct drm_i915_file_private *file;
|
||||
struct list_head global_link;
|
||||
u64 start; /* Start offset always 0 for dri2 */
|
||||
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
|
||||
|
||||
bool is_ggtt;
|
||||
bool closed;
|
||||
|
||||
struct i915_page_scratch *scratch_page;
|
||||
struct i915_page_table *scratch_pt;
|
||||
|
@ -306,6 +363,13 @@ struct i915_address_space {
|
|||
*/
|
||||
struct list_head inactive_list;
|
||||
|
||||
/**
|
||||
* List of vma that have been unbound.
|
||||
*
|
||||
* A reference is not held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head unbound_list;
|
||||
|
||||
/* FIXME: Need a more generic return type */
|
||||
gen6_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
|
@ -338,7 +402,7 @@ struct i915_address_space {
|
|||
u32 flags);
|
||||
};
|
||||
|
||||
#define i915_is_ggtt(V) ((V)->is_ggtt)
|
||||
#define i915_is_ggtt(V) (!(V)->file)
|
||||
|
||||
/* The Graphics Translation Table is the way in which GEN hardware translates a
|
||||
* Graphics Virtual Address into a Physical Address. In addition to the normal
|
||||
|
@ -354,7 +418,6 @@ struct i915_ggtt {
|
|||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
size_t size; /* Total size of Global GTT */
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
|
@ -365,8 +428,6 @@ struct i915_ggtt {
|
|||
bool do_idle_maps;
|
||||
|
||||
int mtrr;
|
||||
|
||||
int (*probe)(struct i915_ggtt *ggtt);
|
||||
};
|
||||
|
||||
struct i915_hw_ppgtt {
|
||||
|
@ -380,8 +441,6 @@ struct i915_hw_ppgtt {
|
|||
struct i915_page_directory pd; /* GEN6-7 */
|
||||
};
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
gen6_pte_t __iomem *pd_addr;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
|
@ -521,14 +580,15 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
|
|||
px_dma(ppgtt->base.scratch_pd);
|
||||
}
|
||||
|
||||
int i915_ggtt_init_hw(struct drm_device *dev);
|
||||
int i915_ggtt_enable_hw(struct drm_device *dev);
|
||||
void i915_gem_init_ggtt(struct drm_device *dev);
|
||||
void i915_ggtt_cleanup_hw(struct drm_device *dev);
|
||||
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
|
||||
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
|
||||
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
|
||||
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
|
||||
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev);
|
||||
void i915_ppgtt_release(struct kref *kref);
|
||||
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
|
||||
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_file_private *fpriv);
|
||||
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
|
@ -562,9 +622,66 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
|
|||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
||||
const struct i915_ggtt_view *view);
|
||||
/* Flags used by pin/bind&friends. */
|
||||
#define PIN_NONBLOCK BIT(0)
|
||||
#define PIN_MAPPABLE BIT(1)
|
||||
#define PIN_ZONE_4G BIT(2)
|
||||
|
||||
#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
|
||||
#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
|
||||
#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
|
||||
#define PIN_UPDATE BIT(8)
|
||||
|
||||
#define PIN_HIGH BIT(9)
|
||||
#define PIN_OFFSET_BIAS BIT(10)
|
||||
#define PIN_OFFSET_FIXED BIT(11)
|
||||
#define PIN_OFFSET_MASK (~4095)
|
||||
|
||||
int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
u64 size, u64 alignment, u64 flags);
|
||||
static inline int __must_check
|
||||
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
{
|
||||
BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
|
||||
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
|
||||
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
|
||||
|
||||
/* Pin early to prevent the shrinker/eviction logic from destroying
|
||||
* our vma as we insert and bind.
|
||||
*/
|
||||
if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
|
||||
return 0;
|
||||
|
||||
return __i915_vma_do_pin(vma, size, alignment, flags);
|
||||
}
|
||||
|
||||
static inline int i915_vma_pin_count(const struct i915_vma *vma)
|
||||
{
|
||||
return vma->flags & I915_VMA_PIN_MASK;
|
||||
}
|
||||
|
||||
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
|
||||
{
|
||||
return i915_vma_pin_count(vma);
|
||||
}
|
||||
|
||||
static inline void __i915_vma_pin(struct i915_vma *vma)
|
||||
{
|
||||
vma->flags++;
|
||||
GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
|
||||
}
|
||||
|
||||
static inline void __i915_vma_unpin(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!i915_vma_is_pinned(vma));
|
||||
vma->flags--;
|
||||
}
|
||||
|
||||
static inline void i915_vma_unpin(struct i915_vma *vma)
|
||||
{
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
__i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
|
||||
|
@ -580,6 +697,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
|
|||
* Returns a valid iomapped pointer or ERR_PTR.
|
||||
*/
|
||||
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
|
||||
#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
|
||||
|
||||
/**
|
||||
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
|
||||
|
@ -593,9 +711,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
|
|||
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
GEM_BUG_ON(vma->pin_count == 0);
|
||||
GEM_BUG_ON(vma->iomap == NULL);
|
||||
vma->pin_count--;
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,10 +28,18 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_renderstate.h"
|
||||
|
||||
struct render_state {
|
||||
const struct intel_renderstate_rodata *rodata;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 ggtt_offset;
|
||||
u32 aux_batch_size;
|
||||
u32 aux_batch_offset;
|
||||
};
|
||||
|
||||
static const struct intel_renderstate_rodata *
|
||||
render_state_get_rodata(const int gen)
|
||||
render_state_get_rodata(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
switch (gen) {
|
||||
switch (INTEL_GEN(req->i915)) {
|
||||
case 6:
|
||||
return &gen6_null_state;
|
||||
case 7:
|
||||
|
@ -45,35 +53,6 @@ render_state_get_rodata(const int gen)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int render_state_init(struct render_state *so,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
so->gen = INTEL_GEN(dev_priv);
|
||||
so->rodata = render_state_get_rodata(so->gen);
|
||||
if (so->rodata == NULL)
|
||||
return 0;
|
||||
|
||||
if (so->rodata->batch_items * 4 > 4096)
|
||||
return -EINVAL;
|
||||
|
||||
so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
if (IS_ERR(so->obj))
|
||||
return PTR_ERR(so->obj);
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
|
||||
if (ret)
|
||||
goto free_gem;
|
||||
|
||||
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
|
||||
return 0;
|
||||
|
||||
free_gem:
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Macro to add commands to auxiliary batch.
|
||||
* This macro only checks for page overflow before inserting the commands,
|
||||
|
@ -96,6 +75,7 @@ static int render_state_setup(struct render_state *so)
|
|||
{
|
||||
struct drm_device *dev = so->obj->base.dev;
|
||||
const struct intel_renderstate_rodata *rodata = so->rodata;
|
||||
const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
|
||||
unsigned int i = 0, reloc_index = 0;
|
||||
struct page *page;
|
||||
u32 *d;
|
||||
|
@ -114,7 +94,7 @@ static int render_state_setup(struct render_state *so)
|
|||
if (i * 4 == rodata->reloc[reloc_index]) {
|
||||
u64 r = s + so->ggtt_offset;
|
||||
s = lower_32_bits(r);
|
||||
if (so->gen >= 8) {
|
||||
if (has_64bit_reloc) {
|
||||
if (i + 1 >= rodata->batch_items ||
|
||||
rodata->batch[i + 1] != 0) {
|
||||
ret = -EINVAL;
|
||||
|
@ -192,67 +172,55 @@ err_out:
|
|||
|
||||
#undef OUT_BATCH
|
||||
|
||||
void i915_gem_render_state_fini(struct render_state *so)
|
||||
{
|
||||
i915_gem_object_ggtt_unpin(so->obj);
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
}
|
||||
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
|
||||
struct render_state *so)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(engine->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
ret = render_state_init(so, engine->i915);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so->rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = render_state_setup(so);
|
||||
if (ret) {
|
||||
i915_gem_render_state_fini(so);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_render_state_prepare(req->engine, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (WARN_ON(req->engine->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
so.rodata = render_state_get_rodata(req);
|
||||
if (!so.rodata)
|
||||
return 0;
|
||||
|
||||
ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (so.rodata->batch_items * 4 > 4096)
|
||||
return -EINVAL;
|
||||
|
||||
so.obj = i915_gem_object_create(&req->i915->drm, 4096);
|
||||
if (IS_ERR(so.obj))
|
||||
return PTR_ERR(so.obj);
|
||||
|
||||
ret = i915_gem_object_ggtt_pin(so.obj, NULL, 0, 0, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err_obj;
|
||||
|
||||
so.ggtt_offset = i915_gem_obj_ggtt_offset(so.obj);
|
||||
|
||||
ret = render_state_setup(&so);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ret = req->engine->emit_bb_start(req, so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
if (so.aux_batch_size > 8) {
|
||||
ret = req->engine->dispatch_execbuffer(req,
|
||||
(so.ggtt_offset +
|
||||
so.aux_batch_offset),
|
||||
so.aux_batch_size,
|
||||
I915_DISPATCH_SECURE);
|
||||
ret = req->engine->emit_bb_start(req,
|
||||
(so.ggtt_offset +
|
||||
so.aux_batch_offset),
|
||||
so.aux_batch_size,
|
||||
I915_DISPATCH_SECURE);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
|
||||
|
||||
out:
|
||||
i915_gem_render_state_fini(&so);
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(so.obj);
|
||||
err_obj:
|
||||
i915_gem_object_put(so.obj);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -26,24 +26,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_renderstate_rodata {
|
||||
const u32 *reloc;
|
||||
const u32 *batch;
|
||||
const u32 batch_items;
|
||||
};
|
||||
|
||||
struct render_state {
|
||||
const struct intel_renderstate_rodata *rodata;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 ggtt_offset;
|
||||
int gen;
|
||||
u32 aux_batch_size;
|
||||
u32 aux_batch_offset;
|
||||
};
|
||||
|
||||
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
|
||||
void i915_gem_render_state_fini(struct render_state *so);
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
|
||||
struct render_state *so);
|
||||
|
||||
#endif /* _I915_GEM_RENDER_STATE_H_ */
|
||||
|
|
767
drivers/gpu/drm/i915/i915_gem_request.c
Normal file
767
drivers/gpu/drm/i915/i915_gem_request.c
Normal file
|
@ -0,0 +1,767 @@
|
|||
/*
|
||||
* Copyright © 2008-2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static const char *i915_fence_get_driver_name(struct fence *fence)
|
||||
{
|
||||
return "i915";
|
||||
}
|
||||
|
||||
static const char *i915_fence_get_timeline_name(struct fence *fence)
|
||||
{
|
||||
/* Timelines are bound by eviction to a VM. However, since
|
||||
* we only have a global seqno at the moment, we only have
|
||||
* a single timeline. Note that each timeline will have
|
||||
* multiple execution contexts (fence contexts) as we allow
|
||||
* engines within a single timeline to execute in parallel.
|
||||
*/
|
||||
return "global";
|
||||
}
|
||||
|
||||
static bool i915_fence_signaled(struct fence *fence)
|
||||
{
|
||||
return i915_gem_request_completed(to_request(fence));
|
||||
}
|
||||
|
||||
static bool i915_fence_enable_signaling(struct fence *fence)
|
||||
{
|
||||
if (i915_fence_signaled(fence))
|
||||
return false;
|
||||
|
||||
intel_engine_enable_signaling(to_request(fence));
|
||||
return true;
|
||||
}
|
||||
|
||||
static signed long i915_fence_wait(struct fence *fence,
|
||||
bool interruptible,
|
||||
signed long timeout_jiffies)
|
||||
{
|
||||
s64 timeout_ns, *timeout;
|
||||
int ret;
|
||||
|
||||
if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
|
||||
timeout_ns = jiffies_to_nsecs(timeout_jiffies);
|
||||
timeout = &timeout_ns;
|
||||
} else {
|
||||
timeout = NULL;
|
||||
}
|
||||
|
||||
ret = i915_wait_request(to_request(fence),
|
||||
interruptible, timeout,
|
||||
NO_WAITBOOST);
|
||||
if (ret == -ETIME)
|
||||
return 0;
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
|
||||
timeout_jiffies = nsecs_to_jiffies(timeout_ns);
|
||||
|
||||
return timeout_jiffies;
|
||||
}
|
||||
|
||||
static void i915_fence_value_str(struct fence *fence, char *str, int size)
|
||||
{
|
||||
snprintf(str, size, "%u", fence->seqno);
|
||||
}
|
||||
|
||||
static void i915_fence_timeline_value_str(struct fence *fence, char *str,
|
||||
int size)
|
||||
{
|
||||
snprintf(str, size, "%u",
|
||||
intel_engine_get_seqno(to_request(fence)->engine));
|
||||
}
|
||||
|
||||
static void i915_fence_release(struct fence *fence)
|
||||
{
|
||||
struct drm_i915_gem_request *req = to_request(fence);
|
||||
|
||||
kmem_cache_free(req->i915->requests, req);
|
||||
}
|
||||
|
||||
const struct fence_ops i915_fence_ops = {
|
||||
.get_driver_name = i915_fence_get_driver_name,
|
||||
.get_timeline_name = i915_fence_get_timeline_name,
|
||||
.enable_signaling = i915_fence_enable_signaling,
|
||||
.signaled = i915_fence_signaled,
|
||||
.wait = i915_fence_wait,
|
||||
.release = i915_fence_release,
|
||||
.fence_value_str = i915_fence_value_str,
|
||||
.timeline_value_str = i915_fence_timeline_value_str,
|
||||
};
|
||||
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_private;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
WARN_ON(!req || !file || req->file_priv);
|
||||
|
||||
if (!req || !file)
|
||||
return -EINVAL;
|
||||
|
||||
if (req->file_priv)
|
||||
return -EINVAL;
|
||||
|
||||
dev_private = req->i915;
|
||||
file_priv = file->driver_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
req->file_priv = file_priv;
|
||||
list_add_tail(&req->client_list, &file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
req->pid = get_pid(task_pid(current));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = request->file_priv;
|
||||
|
||||
if (!file_priv)
|
||||
return;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
list_del(&request->client_list);
|
||||
request->file_priv = NULL;
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
put_pid(request->pid);
|
||||
request->pid = NULL;
|
||||
}
|
||||
|
||||
void i915_gem_retire_noop(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
/* Space left intentionally blank */
|
||||
}
|
||||
|
||||
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct i915_gem_active *active, *next;
|
||||
|
||||
trace_i915_gem_request_retire(request);
|
||||
list_del(&request->link);
|
||||
|
||||
/* We know the GPU must have read the request to have
|
||||
* sent us the seqno + interrupt, so use the position
|
||||
* of tail of the request to update the last known position
|
||||
* of the GPU head.
|
||||
*
|
||||
* Note this requires that we are always called in request
|
||||
* completion order.
|
||||
*/
|
||||
list_del(&request->ring_link);
|
||||
request->ring->last_retired_head = request->postfix;
|
||||
|
||||
/* Walk through the active list, calling retire on each. This allows
|
||||
* objects to track their GPU activity and mark themselves as idle
|
||||
* when their *last* active request is completed (updating state
|
||||
* tracking lists for eviction, active references for GEM, etc).
|
||||
*
|
||||
* As the ->retire() may free the node, we decouple it first and
|
||||
* pass along the auxiliary information (to avoid dereferencing
|
||||
* the node after the callback).
|
||||
*/
|
||||
list_for_each_entry_safe(active, next, &request->active_list, link) {
|
||||
/* In microbenchmarks or focusing upon time inside the kernel,
|
||||
* we may spend an inordinate amount of time simply handling
|
||||
* the retirement of requests and processing their callbacks.
|
||||
* Of which, this loop itself is particularly hot due to the
|
||||
* cache misses when jumping around the list of i915_gem_active.
|
||||
* So we try to keep this loop as streamlined as possible and
|
||||
* also prefetch the next i915_gem_active to try and hide
|
||||
* the likely cache miss.
|
||||
*/
|
||||
prefetchw(next);
|
||||
|
||||
INIT_LIST_HEAD(&active->link);
|
||||
RCU_INIT_POINTER(active->request, NULL);
|
||||
|
||||
active->retire(active, request);
|
||||
}
|
||||
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
if (request->previous_context) {
|
||||
if (i915.enable_execlists)
|
||||
intel_lr_context_unpin(request->previous_context,
|
||||
request->engine);
|
||||
}
|
||||
|
||||
i915_gem_context_put(request->ctx);
|
||||
i915_gem_request_put(request);
|
||||
}
|
||||
|
||||
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(list_empty(&req->link));
|
||||
|
||||
do {
|
||||
tmp = list_first_entry(&engine->request_list,
|
||||
typeof(*tmp), link);
|
||||
|
||||
i915_gem_request_retire(tmp);
|
||||
} while (tmp != req);
|
||||
}
|
||||
|
||||
static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
|
||||
{
|
||||
if (__i915_terminally_wedged(reset_counter))
|
||||
return -EIO;
|
||||
|
||||
if (__i915_reset_in_progress(reset_counter)) {
|
||||
/* Non-interruptible callers can't handle -EAGAIN, hence return
|
||||
* -EIO unconditionally for these.
|
||||
*/
|
||||
if (!interruptible)
|
||||
return -EIO;
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
int ret;
|
||||
|
||||
/* Carefully retire all requests without writing to the rings */
|
||||
for_each_engine(engine, dev_priv) {
|
||||
ret = intel_engine_idle(engine, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
||||
if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
|
||||
while (intel_kick_waiters(dev_priv) ||
|
||||
intel_kick_signalers(dev_priv))
|
||||
yield();
|
||||
}
|
||||
|
||||
/* Finally reset hw state */
|
||||
for_each_engine(engine, dev_priv)
|
||||
intel_engine_init_seqno(engine, seqno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
if (seqno == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* HWS page needs to be set less than what we
|
||||
* will inject to ring
|
||||
*/
|
||||
ret = i915_gem_init_seqno(dev_priv, seqno - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->next_seqno = seqno;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
|
||||
{
|
||||
/* reserve 0 for non-seqno */
|
||||
if (unlikely(dev_priv->next_seqno == 0)) {
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_init_seqno(dev_priv, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev_priv->next_seqno = 1;
|
||||
}
|
||||
|
||||
*seqno = dev_priv->next_seqno++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_request_alloc - allocate a request structure
|
||||
*
|
||||
* @engine: engine that we wish to issue the request on.
|
||||
* @ctx: context that the request will be associated with.
|
||||
* This can be NULL if the request is not directly related to
|
||||
* any specific user context, in which case this function will
|
||||
* choose an appropriate context to use.
|
||||
*
|
||||
* Returns a pointer to the allocated request if successful,
|
||||
* or an error code if not.
|
||||
*/
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
||||
struct drm_i915_gem_request *req;
|
||||
u32 seqno;
|
||||
int ret;
|
||||
|
||||
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
|
||||
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
|
||||
* and restart.
|
||||
*/
|
||||
ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* Move the oldest request to the slab-cache (if not in use!) */
|
||||
req = list_first_entry_or_null(&engine->request_list,
|
||||
typeof(*req), link);
|
||||
if (req && i915_gem_request_completed(req))
|
||||
i915_gem_request_retire(req);
|
||||
|
||||
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
|
||||
if (!req)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = i915_gem_get_seqno(dev_priv, &seqno);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
spin_lock_init(&req->lock);
|
||||
fence_init(&req->fence,
|
||||
&i915_fence_ops,
|
||||
&req->lock,
|
||||
engine->fence_context,
|
||||
seqno);
|
||||
|
||||
INIT_LIST_HEAD(&req->active_list);
|
||||
req->i915 = dev_priv;
|
||||
req->engine = engine;
|
||||
req->ctx = i915_gem_context_get(ctx);
|
||||
|
||||
/*
|
||||
* Reserve space in the ring buffer for all the commands required to
|
||||
* eventually emit this request. This is to guarantee that the
|
||||
* i915_add_request() call can't fail. Note that the reserve may need
|
||||
* to be redone if the request is not actually submitted straight
|
||||
* away, e.g. because a GPU scheduler has deferred it.
|
||||
*/
|
||||
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
ret = intel_logical_ring_alloc_request_extras(req);
|
||||
else
|
||||
ret = intel_ring_alloc_request_extras(req);
|
||||
if (ret)
|
||||
goto err_ctx;
|
||||
|
||||
return req;
|
||||
|
||||
err_ctx:
|
||||
i915_gem_context_put(ctx);
|
||||
err:
|
||||
kmem_cache_free(dev_priv->requests, req);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
dev_priv->gt.active_engines |= intel_engine_flag(engine);
|
||||
if (dev_priv->gt.awake)
|
||||
return;
|
||||
|
||||
intel_runtime_pm_get_noresume(dev_priv);
|
||||
dev_priv->gt.awake = true;
|
||||
|
||||
intel_enable_gt_powersave(dev_priv);
|
||||
i915_update_gfx_val(dev_priv);
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
gen6_rps_busy(dev_priv);
|
||||
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->gt.retire_work,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
|
||||
/*
|
||||
* NB: This function is not allowed to fail. Doing so would mean the the
|
||||
* request is not being tracked for completion but the work itself is
|
||||
* going to happen on the hardware. This would be a Bad Thing(tm).
|
||||
*/
|
||||
void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool flush_caches)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ring *ring;
|
||||
u32 request_start;
|
||||
u32 reserved_tail;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!request))
|
||||
return;
|
||||
|
||||
engine = request->engine;
|
||||
ring = request->ring;
|
||||
|
||||
/*
|
||||
* To ensure that this call will not fail, space for its emissions
|
||||
* should already have been reserved in the ring buffer. Let the ring
|
||||
* know that it is time to use that space up.
|
||||
*/
|
||||
request_start = ring->tail;
|
||||
reserved_tail = request->reserved_space;
|
||||
request->reserved_space = 0;
|
||||
|
||||
/*
|
||||
* Emit any outstanding flushes - execbuf can fail to emit the flush
|
||||
* after having emitted the batchbuffer command. Hence we need to fix
|
||||
* things up similar to emitting the lazy request. The difference here
|
||||
* is that the flush _must_ happen before the next request, no matter
|
||||
* what.
|
||||
*/
|
||||
if (flush_caches) {
|
||||
ret = engine->emit_flush(request, EMIT_FLUSH);
|
||||
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
|
||||
}
|
||||
|
||||
trace_i915_gem_request_add(request);
|
||||
|
||||
request->head = request_start;
|
||||
|
||||
/* Whilst this request exists, batch_obj will be on the
|
||||
* active_list, and so will hold the active reference. Only when this
|
||||
* request is retired will the the batch_obj be moved onto the
|
||||
* inactive_list and lose its active reference. Hence we do not need
|
||||
* to explicitly hold another reference here.
|
||||
*/
|
||||
request->batch_obj = obj;
|
||||
|
||||
/* Seal the request and mark it as pending execution. Note that
|
||||
* we may inspect this state, without holding any locks, during
|
||||
* hangcheck. Hence we apply the barrier to ensure that we do not
|
||||
* see a more recent value in the hws than we are tracking.
|
||||
*/
|
||||
request->emitted_jiffies = jiffies;
|
||||
request->previous_seqno = engine->last_submitted_seqno;
|
||||
engine->last_submitted_seqno = request->fence.seqno;
|
||||
i915_gem_active_set(&engine->last_request, request);
|
||||
list_add_tail(&request->link, &engine->request_list);
|
||||
list_add_tail(&request->ring_link, &ring->request_list);
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
* should we detect the updated seqno part-way through the
|
||||
* GPU processing the request, we never over-estimate the
|
||||
* position of the head.
|
||||
*/
|
||||
request->postfix = ring->tail;
|
||||
|
||||
/* Not allowed to fail! */
|
||||
ret = engine->emit_request(request);
|
||||
WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
|
||||
|
||||
/* Sanity check that the reserved size was large enough. */
|
||||
ret = ring->tail - request_start;
|
||||
if (ret < 0)
|
||||
ret += ring->size;
|
||||
WARN_ONCE(ret > reserved_tail,
|
||||
"Not enough space reserved (%d bytes) "
|
||||
"for adding the request (%d bytes)\n",
|
||||
reserved_tail, ret);
|
||||
|
||||
i915_gem_mark_busy(engine);
|
||||
engine->submit_request(request);
|
||||
}
|
||||
|
||||
static unsigned long local_clock_us(unsigned int *cpu)
|
||||
{
|
||||
unsigned long t;
|
||||
|
||||
/* Cheaply and approximately convert from nanoseconds to microseconds.
|
||||
* The result and subsequent calculations are also defined in the same
|
||||
* approximate microseconds units. The principal source of timing
|
||||
* error here is from the simple truncation.
|
||||
*
|
||||
* Note that local_clock() is only defined wrt to the current CPU;
|
||||
* the comparisons are no longer valid if we switch CPUs. Instead of
|
||||
* blocking preemption for the entire busywait, we can detect the CPU
|
||||
* switch and use that as indicator of system load and a reason to
|
||||
* stop busywaiting, see busywait_stop().
|
||||
*/
|
||||
*cpu = get_cpu();
|
||||
t = local_clock() >> 10;
|
||||
put_cpu();
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static bool busywait_stop(unsigned long timeout, unsigned int cpu)
|
||||
{
|
||||
unsigned int this_cpu;
|
||||
|
||||
if (time_after(local_clock_us(&this_cpu), timeout))
|
||||
return true;
|
||||
|
||||
return this_cpu != cpu;
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *req,
|
||||
int state, unsigned long timeout_us)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* When waiting for high frequency requests, e.g. during synchronous
|
||||
* rendering split between the CPU and GPU, the finite amount of time
|
||||
* required to set up the irq and wait upon it limits the response
|
||||
* rate. By busywaiting on the request completion for a short while we
|
||||
* can service the high frequency waits as quick as possible. However,
|
||||
* if it is a slow request, we want to sleep as quickly as possible.
|
||||
* The tradeoff between waiting and sleeping is roughly the time it
|
||||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
timeout_us += local_clock_us(&cpu);
|
||||
do {
|
||||
if (i915_gem_request_completed(req))
|
||||
return true;
|
||||
|
||||
if (signal_pending_state(state, current))
|
||||
break;
|
||||
|
||||
if (busywait_stop(timeout_us, cpu))
|
||||
break;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
} while (!need_resched());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_wait_request - wait until execution of request has finished
|
||||
* @req: duh!
|
||||
* @interruptible: do an interruptible wait (normally yes)
|
||||
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
|
||||
* @rps: client to charge for RPS boosting
|
||||
*
|
||||
* Note: It is of utmost importance that the passed in seqno and reset_counter
|
||||
* values have been read by the caller in an smp safe manner. Where read-side
|
||||
* locks are involved, it is sufficient to read the reset_counter before
|
||||
* unlocking the lock that protects the seqno. For lockless tricks, the
|
||||
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
|
||||
* inserted.
|
||||
*
|
||||
* Returns 0 if the request was found within the alloted time. Else returns the
|
||||
* errno with remaining time filled in timeout argument.
|
||||
*/
|
||||
int i915_wait_request(struct drm_i915_gem_request *req,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps)
|
||||
{
|
||||
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||
DEFINE_WAIT(reset);
|
||||
struct intel_wait wait;
|
||||
unsigned long timeout_remain;
|
||||
int ret = 0;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (i915_gem_request_completed(req))
|
||||
return 0;
|
||||
|
||||
timeout_remain = MAX_SCHEDULE_TIMEOUT;
|
||||
if (timeout) {
|
||||
if (WARN_ON(*timeout < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (*timeout == 0)
|
||||
return -ETIME;
|
||||
|
||||
/* Record current time in case interrupted, or wedged */
|
||||
timeout_remain = nsecs_to_jiffies_timeout(*timeout);
|
||||
*timeout += ktime_get_raw_ns();
|
||||
}
|
||||
|
||||
trace_i915_gem_request_wait_begin(req);
|
||||
|
||||
/* This client is about to stall waiting for the GPU. In many cases
|
||||
* this is undesirable and limits the throughput of the system, as
|
||||
* many clients cannot continue processing user input/output whilst
|
||||
* blocked. RPS autotuning may take tens of milliseconds to respond
|
||||
* to the GPU load and thus incurs additional latency for the client.
|
||||
* We can circumvent that by promoting the GPU frequency to maximum
|
||||
* before we wait. This makes the GPU throttle up much more quickly
|
||||
* (good for benchmarks and user experience, e.g. window animations),
|
||||
* but at a cost of spending more power processing the workload
|
||||
* (bad for battery). Not all clients even want their results
|
||||
* immediately and for them we should just let the GPU select its own
|
||||
* frequency to maximise efficiency. To prevent a single client from
|
||||
* forcing the clocks too high for the whole system, we only allow
|
||||
* each client to waitboost once in a busy period.
|
||||
*/
|
||||
if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
|
||||
gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
|
||||
|
||||
/* Optimistic short spin before touching IRQs */
|
||||
if (i915_spin_request(req, state, 5))
|
||||
goto complete;
|
||||
|
||||
set_current_state(state);
|
||||
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
|
||||
|
||||
intel_wait_init(&wait, req->fence.seqno);
|
||||
if (intel_engine_add_wait(req->engine, &wait))
|
||||
/* In order to check that we haven't missed the interrupt
|
||||
* as we enabled it, we need to kick ourselves to do a
|
||||
* coherent check on the seqno before we sleep.
|
||||
*/
|
||||
goto wakeup;
|
||||
|
||||
for (;;) {
|
||||
if (signal_pending_state(state, current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
|
||||
timeout_remain = io_schedule_timeout(timeout_remain);
|
||||
if (timeout_remain == 0) {
|
||||
ret = -ETIME;
|
||||
break;
|
||||
}
|
||||
|
||||
if (intel_wait_complete(&wait))
|
||||
break;
|
||||
|
||||
set_current_state(state);
|
||||
|
||||
wakeup:
|
||||
/* Carefully check if the request is complete, giving time
|
||||
* for the seqno to be visible following the interrupt.
|
||||
* We also have to check in case we are kicked by the GPU
|
||||
* reset in order to drop the struct_mutex.
|
||||
*/
|
||||
if (__i915_request_irq_complete(req))
|
||||
break;
|
||||
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
if (i915_spin_request(req, state, 2))
|
||||
break;
|
||||
}
|
||||
remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
|
||||
|
||||
intel_engine_remove_wait(req->engine, &wait);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
complete:
|
||||
trace_i915_gem_request_wait_end(req);
|
||||
|
||||
if (timeout) {
|
||||
*timeout -= ktime_get_raw_ns();
|
||||
if (*timeout < 0)
|
||||
*timeout = 0;
|
||||
|
||||
/*
|
||||
* Apparently ktime isn't accurate enough and occasionally has a
|
||||
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
|
||||
* things up to make the test happy. We allow up to 1 jiffy.
|
||||
*
|
||||
* This is a regrssion from the timespec->ktime conversion.
|
||||
*/
|
||||
if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
|
||||
*timeout = 0;
|
||||
}
|
||||
|
||||
if (IS_RPS_USER(rps) &&
|
||||
req->fence.seqno == req->engine->last_submitted_seqno) {
|
||||
/* The GPU is now idle and this client has stalled.
|
||||
* Since no other client has submitted a request in the
|
||||
* meantime, assume that this client is the only one
|
||||
* supplying work to the GPU but is unable to keep that
|
||||
* work supplied because it is waiting. Since the GPU is
|
||||
* then never kept fully busy, RPS autoclocking will
|
||||
* keep the clocks relatively low, causing further delays.
|
||||
* Compensate by giving the synchronous client credit for
|
||||
* a waitboost next time.
|
||||
*/
|
||||
spin_lock(&req->i915->rps.client_lock);
|
||||
list_del_init(&rps->link);
|
||||
spin_unlock(&req->i915->rps.client_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void engine_retire_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request, *next;
|
||||
|
||||
list_for_each_entry_safe(request, next, &engine->request_list, link) {
|
||||
if (!i915_gem_request_completed(request))
|
||||
break;
|
||||
|
||||
i915_gem_request_retire(request);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (dev_priv->gt.active_engines == 0)
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!dev_priv->gt.awake);
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
engine_retire_requests(engine);
|
||||
if (!intel_engine_is_active(engine))
|
||||
dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (dev_priv->gt.active_engines == 0)
|
||||
queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->gt.idle_work,
|
||||
msecs_to_jiffies(100));
|
||||
}
|
676
drivers/gpu/drm/i915/i915_gem_request.h
Normal file
676
drivers/gpu/drm/i915/i915_gem_request.h
Normal file
|
@ -0,0 +1,676 @@
|
|||
/*
|
||||
* Copyright © 2008-2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef I915_GEM_REQUEST_H
|
||||
#define I915_GEM_REQUEST_H
|
||||
|
||||
#include <linux/fence.h>
|
||||
|
||||
#include "i915_gem.h"
|
||||
|
||||
struct intel_wait {
|
||||
struct rb_node node;
|
||||
struct task_struct *tsk;
|
||||
u32 seqno;
|
||||
};
|
||||
|
||||
struct intel_signal_node {
|
||||
struct rb_node node;
|
||||
struct intel_wait wait;
|
||||
};
|
||||
|
||||
/**
|
||||
* Request queue structure.
|
||||
*
|
||||
* The request queue allows us to note sequence numbers that have been emitted
|
||||
* and may be associated with active buffers to be retired.
|
||||
*
|
||||
* By keeping this list, we can avoid having to do questionable sequence
|
||||
* number comparisons on buffer last_read|write_seqno. It also allows an
|
||||
* emission time to be associated with the request for tracking how far ahead
|
||||
* of the GPU the submission is.
|
||||
*
|
||||
* The requests are reference counted.
|
||||
*/
|
||||
struct drm_i915_gem_request {
|
||||
struct fence fence;
|
||||
spinlock_t lock;
|
||||
|
||||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
|
||||
/**
|
||||
* Context and ring buffer related to this request
|
||||
* Contexts are refcounted, so when this request is associated with a
|
||||
* context, we must increment the context's refcount, to guarantee that
|
||||
* it persists while any request is linked to it. Requests themselves
|
||||
* are also refcounted, so the request will only be freed when the last
|
||||
* reference to it is dismissed, and the code in
|
||||
* i915_gem_request_free() will then decrement the refcount on the
|
||||
* context.
|
||||
*/
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_ring *ring;
|
||||
struct intel_signal_node signaling;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
* this request.
|
||||
*/
|
||||
u32 previous_seqno;
|
||||
|
||||
/** Position in the ringbuffer of the start of the request */
|
||||
u32 head;
|
||||
|
||||
/**
|
||||
* Position in the ringbuffer of the start of the postfix.
|
||||
* This is required to calculate the maximum available ringbuffer
|
||||
* space without overwriting the postfix.
|
||||
*/
|
||||
u32 postfix;
|
||||
|
||||
/** Position in the ringbuffer of the end of the whole request */
|
||||
u32 tail;
|
||||
|
||||
/** Preallocate space in the ringbuffer for the emitting the request */
|
||||
u32 reserved_space;
|
||||
|
||||
/**
|
||||
* Context related to the previous request.
|
||||
* As the contexts are accessed by the hardware until the switch is
|
||||
* completed to a new context, the hardware may still be writing
|
||||
* to the context object after the breadcrumb is visible. We must
|
||||
* not unpin/unbind/prune that object whilst still active and so
|
||||
* we keep the previous context pinned until the following (this)
|
||||
* request is retired.
|
||||
*/
|
||||
struct i915_gem_context *previous_context;
|
||||
|
||||
/** Batch buffer related to this request if any (used for
|
||||
* error state dump only).
|
||||
*/
|
||||
struct drm_i915_gem_object *batch_obj;
|
||||
struct list_head active_list;
|
||||
|
||||
/** Time at which this request was emitted, in jiffies. */
|
||||
unsigned long emitted_jiffies;
|
||||
|
||||
/** engine->request_list entry for this request */
|
||||
struct list_head link;
|
||||
|
||||
/** ring->request_list entry for this request */
|
||||
struct list_head ring_link;
|
||||
|
||||
struct drm_i915_file_private *file_priv;
|
||||
/** file_priv list entry for this request */
|
||||
struct list_head client_list;
|
||||
|
||||
/** process identifier submitting this request */
|
||||
struct pid *pid;
|
||||
|
||||
/**
|
||||
* The ELSP only accepts two elements at a time, so we queue
|
||||
* context/tail pairs on a given queue (ring->execlist_queue) until the
|
||||
* hardware is available. The queue serves a double purpose: we also use
|
||||
* it to keep track of the up to 2 contexts currently in the hardware
|
||||
* (usually one in execution and the other queued up by the GPU): We
|
||||
* only remove elements from the head of the queue when the hardware
|
||||
* informs us that an element has been completed.
|
||||
*
|
||||
* All accesses to the queue are mediated by a spinlock
|
||||
* (ring->execlist_lock).
|
||||
*/
|
||||
|
||||
/** Execlist link in the submission queue.*/
|
||||
struct list_head execlist_link;
|
||||
|
||||
/** Execlists no. of times this request has been sent to the ELSP */
|
||||
int elsp_submitted;
|
||||
|
||||
/** Execlists context hardware id. */
|
||||
unsigned int ctx_hw_id;
|
||||
};
|
||||
|
||||
extern const struct fence_ops i915_fence_ops;
|
||||
|
||||
static inline bool fence_is_i915(struct fence *fence)
|
||||
{
|
||||
return fence->ops == &i915_fence_ops;
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request * __must_check
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file);
|
||||
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
|
||||
|
||||
static inline u32
|
||||
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->fence.seqno : 0;
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
i915_gem_request_get_engine(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return req ? req->engine : NULL;
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
to_request(struct fence *fence)
|
||||
{
|
||||
/* We assume that NULL fence/request are interoperable */
|
||||
BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
|
||||
GEM_BUG_ON(fence && !fence_is_i915(fence));
|
||||
return container_of(fence, struct drm_i915_gem_request, fence);
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_request_get(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return to_request(fence_get(&req->fence));
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
|
||||
{
|
||||
return to_request(fence_get_rcu(&req->fence));
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_request_put(struct drm_i915_gem_request *req)
|
||||
{
|
||||
fence_put(&req->fence);
|
||||
}
|
||||
|
||||
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
|
||||
struct drm_i915_gem_request *src)
|
||||
{
|
||||
if (src)
|
||||
i915_gem_request_get(src);
|
||||
|
||||
if (*pdst)
|
||||
i915_gem_request_put(*pdst);
|
||||
|
||||
*pdst = src;
|
||||
}
|
||||
|
||||
void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
bool flush_caches);
|
||||
#define i915_add_request(req) \
|
||||
__i915_add_request(req, NULL, true)
|
||||
#define i915_add_request_no_flush(req) \
|
||||
__i915_add_request(req, NULL, false)
|
||||
|
||||
struct intel_rps_client;
|
||||
#define NO_WAITBOOST ERR_PTR(-1)
|
||||
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
|
||||
#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
|
||||
|
||||
int i915_wait_request(struct drm_i915_gem_request *req,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps)
|
||||
__attribute__((nonnull(1)));
|
||||
|
||||
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
|
||||
|
||||
/**
|
||||
* Returns true if seq1 is later than seq2.
|
||||
*/
|
||||
static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
|
||||
{
|
||||
return (s32)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_request_started(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_request_completed(const struct drm_i915_gem_request *req)
|
||||
{
|
||||
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
|
||||
req->fence.seqno);
|
||||
}
|
||||
|
||||
bool __i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us);
|
||||
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
|
||||
int state, unsigned long timeout_us)
|
||||
{
|
||||
return (i915_gem_request_started(request) &&
|
||||
__i915_spin_request(request, state, timeout_us));
|
||||
}
|
||||
|
||||
/* We treat requests as fences. This is not be to confused with our
|
||||
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
|
||||
* We use the fences to synchronize access from the CPU with activity on the
|
||||
* GPU, for example, we should not rewrite an object's PTE whilst the GPU
|
||||
* is reading them. We also track fences at a higher level to provide
|
||||
* implicit synchronisation around GEM objects, e.g. set-domain will wait
|
||||
* for outstanding GPU rendering before marking the object ready for CPU
|
||||
* access, or a pageflip will wait until the GPU is complete before showing
|
||||
* the frame on the scanout.
|
||||
*
|
||||
* In order to use a fence, the object must track the fence it needs to
|
||||
* serialise with. For example, GEM objects want to track both read and
|
||||
* write access so that we can perform concurrent read operations between
|
||||
* the CPU and GPU engines, as well as waiting for all rendering to
|
||||
* complete, or waiting for the last GPU user of a "fence register". The
|
||||
* object then embeds a #i915_gem_active to track the most recent (in
|
||||
* retirement order) request relevant for the desired mode of access.
|
||||
* The #i915_gem_active is updated with i915_gem_active_set() to track the
|
||||
* most recent fence request, typically this is done as part of
|
||||
* i915_vma_move_to_active().
|
||||
*
|
||||
* When the #i915_gem_active completes (is retired), it will
|
||||
* signal its completion to the owner through a callback as well as mark
|
||||
* itself as idle (i915_gem_active.request == NULL). The owner
|
||||
* can then perform any action, such as delayed freeing of an active
|
||||
* resource including itself.
|
||||
*/
|
||||
struct i915_gem_active;
|
||||
|
||||
typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
|
||||
struct drm_i915_gem_request *);
|
||||
|
||||
struct i915_gem_active {
|
||||
struct drm_i915_gem_request __rcu *request;
|
||||
struct list_head link;
|
||||
i915_gem_retire_fn retire;
|
||||
};
|
||||
|
||||
void i915_gem_retire_noop(struct i915_gem_active *,
|
||||
struct drm_i915_gem_request *request);
|
||||
|
||||
/**
|
||||
* init_request_active - prepares the activity tracker for use
|
||||
* @active - the active tracker
|
||||
* @func - a callback when then the tracker is retired (becomes idle),
|
||||
* can be NULL
|
||||
*
|
||||
* init_request_active() prepares the embedded @active struct for use as
|
||||
* an activity tracker, that is for tracking the last known active request
|
||||
* associated with it. When the last request becomes idle, when it is retired
|
||||
* after completion, the optional callback @func is invoked.
|
||||
*/
|
||||
static inline void
|
||||
init_request_active(struct i915_gem_active *active,
|
||||
i915_gem_retire_fn retire)
|
||||
{
|
||||
INIT_LIST_HEAD(&active->link);
|
||||
active->retire = retire ?: i915_gem_retire_noop;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_set - updates the tracker to watch the current request
|
||||
* @active - the active tracker
|
||||
* @request - the request to watch
|
||||
*
|
||||
* i915_gem_active_set() watches the given @request for completion. Whilst
|
||||
* that @request is busy, the @active reports busy. When that @request is
|
||||
* retired, the @active tracker is updated to report idle.
|
||||
*/
|
||||
static inline void
|
||||
i915_gem_active_set(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
list_move(&active->link, &request->active_list);
|
||||
rcu_assign_pointer(active->request, request);
|
||||
}
|
||||
|
||||
static inline struct drm_i915_gem_request *
|
||||
__i915_gem_active_peek(const struct i915_gem_active *active)
|
||||
{
|
||||
/* Inside the error capture (running with the driver in an unknown
|
||||
* state), we want to bend the rules slightly (a lot).
|
||||
*
|
||||
* Work is in progress to make it safer, in the meantime this keeps
|
||||
* the known issue from spamming the logs.
|
||||
*/
|
||||
return rcu_dereference_protected(active->request, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_peek - report the active request being monitored
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_peek() returns the current request being tracked if
|
||||
* still active, or NULL. It does not obtain a reference on the request
|
||||
* for the caller, so the caller must hold struct_mutex.
|
||||
*/
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = rcu_dereference_protected(active->request,
|
||||
lockdep_is_held(mutex));
|
||||
if (!request || i915_gem_request_completed(request))
|
||||
return NULL;
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_peek_rcu - report the active request being monitored
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_peek_rcu() returns the current request being tracked if
|
||||
* still active, or NULL. It does not obtain a reference on the request
|
||||
* for the caller, and inspection of the request is only valid under
|
||||
* the RCU lock.
|
||||
*/
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_active_peek_rcu(const struct i915_gem_active *active)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = rcu_dereference(active->request);
|
||||
if (!request || i915_gem_request_completed(request))
|
||||
return NULL;
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_get - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_get() returns a reference to the active request, or NULL
|
||||
* if the active tracker is idle. The caller must hold struct_mutex.
|
||||
*/
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
|
||||
{
|
||||
return i915_gem_request_get(i915_gem_active_peek(active, mutex));
|
||||
}
|
||||
|
||||
/**
|
||||
* __i915_gem_active_get_rcu - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* __i915_gem_active_get() returns a reference to the active request, or NULL
|
||||
* if the active tracker is idle. The caller must hold the RCU read lock, but
|
||||
* the returned pointer is safe to use outside of RCU.
|
||||
*/
|
||||
static inline struct drm_i915_gem_request *
|
||||
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
|
||||
{
|
||||
/* Performing a lockless retrieval of the active request is super
|
||||
* tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
|
||||
* slab of request objects will not be freed whilst we hold the
|
||||
* RCU read lock. It does not guarantee that the request itself
|
||||
* will not be freed and then *reused*. Viz,
|
||||
*
|
||||
* Thread A Thread B
|
||||
*
|
||||
* req = active.request
|
||||
* retire(req) -> free(req);
|
||||
* (req is now first on the slab freelist)
|
||||
* active.request = NULL
|
||||
*
|
||||
* req = new submission on a new object
|
||||
* ref(req)
|
||||
*
|
||||
* To prevent the request from being reused whilst the caller
|
||||
* uses it, we take a reference like normal. Whilst acquiring
|
||||
* the reference we check that it is not in a destroyed state
|
||||
* (refcnt == 0). That prevents the request being reallocated
|
||||
* whilst the caller holds on to it. To check that the request
|
||||
* was not reallocated as we acquired the reference we have to
|
||||
* check that our request remains the active request across
|
||||
* the lookup, in the same manner as a seqlock. The visibility
|
||||
* of the pointer versus the reference counting is controlled
|
||||
* by using RCU barriers (rcu_dereference and rcu_assign_pointer).
|
||||
*
|
||||
* In the middle of all that, we inspect whether the request is
|
||||
* complete. Retiring is lazy so the request may be completed long
|
||||
* before the active tracker is updated. Querying whether the
|
||||
* request is complete is far cheaper (as it involves no locked
|
||||
* instructions setting cachelines to exclusive) than acquiring
|
||||
* the reference, so we do it first. The RCU read lock ensures the
|
||||
* pointer dereference is valid, but does not ensure that the
|
||||
* seqno nor HWS is the right one! However, if the request was
|
||||
* reallocated, that means the active tracker's request was complete.
|
||||
* If the new request is also complete, then both are and we can
|
||||
* just report the active tracker is idle. If the new request is
|
||||
* incomplete, then we acquire a reference on it and check that
|
||||
* it remained the active request.
|
||||
*/
|
||||
do {
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = rcu_dereference(active->request);
|
||||
if (!request || i915_gem_request_completed(request))
|
||||
return NULL;
|
||||
|
||||
request = i915_gem_request_get_rcu(request);
|
||||
|
||||
/* What stops the following rcu_access_pointer() from occurring
|
||||
* before the above i915_gem_request_get_rcu()? If we were
|
||||
* to read the value before pausing to get the reference to
|
||||
* the request, we may not notice a change in the active
|
||||
* tracker.
|
||||
*
|
||||
* The rcu_access_pointer() is a mere compiler barrier, which
|
||||
* means both the CPU and compiler are free to perform the
|
||||
* memory read without constraint. The compiler only has to
|
||||
* ensure that any operations after the rcu_access_pointer()
|
||||
* occur afterwards in program order. This means the read may
|
||||
* be performed earlier by an out-of-order CPU, or adventurous
|
||||
* compiler.
|
||||
*
|
||||
* The atomic operation at the heart of
|
||||
* i915_gem_request_get_rcu(), see fence_get_rcu(), is
|
||||
* atomic_inc_not_zero() which is only a full memory barrier
|
||||
* when successful. That is, if i915_gem_request_get_rcu()
|
||||
* returns the request (and so with the reference counted
|
||||
* incremented) then the following read for rcu_access_pointer()
|
||||
* must occur after the atomic operation and so confirm
|
||||
* that this request is the one currently being tracked.
|
||||
*/
|
||||
if (!request || request == rcu_access_pointer(active->request))
|
||||
return rcu_pointer_handoff(request);
|
||||
|
||||
i915_gem_request_put(request);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_get_unlocked - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_get_unlocked() returns a reference to the active request,
|
||||
* or NULL if the active tracker is idle. The reference is obtained under RCU,
|
||||
* so no locking is required by the caller.
|
||||
*
|
||||
* The reference should be freed with i915_gem_request_put().
|
||||
*/
|
||||
static inline struct drm_i915_gem_request *
|
||||
i915_gem_active_get_unlocked(const struct i915_gem_active *active)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
rcu_read_lock();
|
||||
request = __i915_gem_active_get_rcu(active);
|
||||
rcu_read_unlock();
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_isset - report whether the active tracker is assigned
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_isset() returns true if the active tracker is currently
|
||||
* assigned to a request. Due to the lazy retiring, that request may be idle
|
||||
* and this may report stale information.
|
||||
*/
|
||||
static inline bool
|
||||
i915_gem_active_isset(const struct i915_gem_active *active)
|
||||
{
|
||||
return rcu_access_pointer(active->request);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_is_idle - report whether the active tracker is idle
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_gem_active_is_idle() returns true if the active tracker is currently
|
||||
* unassigned or if the request is complete (but not yet retired). Requires
|
||||
* the caller to hold struct_mutex (but that can be relaxed if desired).
|
||||
*/
|
||||
static inline bool
|
||||
i915_gem_active_is_idle(const struct i915_gem_active *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
return !i915_gem_active_peek(active, mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_wait - waits until the request is completed
|
||||
* @active - the active request on which to wait
|
||||
*
|
||||
* i915_gem_active_wait() waits until the request is completed before
|
||||
* returning. Note that it does not guarantee that the request is
|
||||
* retired first, see i915_gem_active_retire().
|
||||
*
|
||||
* i915_gem_active_wait() returns immediately if the active
|
||||
* request is already complete.
|
||||
*/
|
||||
static inline int __must_check
|
||||
i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
|
||||
request = i915_gem_active_peek(active, mutex);
|
||||
if (!request)
|
||||
return 0;
|
||||
|
||||
return i915_wait_request(request, true, NULL, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_wait_unlocked - waits until the request is completed
|
||||
* @active - the active request on which to wait
|
||||
* @interruptible - whether the wait can be woken by a userspace signal
|
||||
* @timeout - how long to wait at most
|
||||
* @rps - userspace client to charge for a waitboost
|
||||
*
|
||||
* i915_gem_active_wait_unlocked() waits until the request is completed before
|
||||
* returning, without requiring any locks to be held. Note that it does not
|
||||
* retire any requests before returning.
|
||||
*
|
||||
* This function relies on RCU in order to acquire the reference to the active
|
||||
* request without holding any locks. See __i915_gem_active_get_rcu() for the
|
||||
* glory details on how that is managed. Once the reference is acquired, we
|
||||
* can then wait upon the request, and afterwards release our reference,
|
||||
* free of any locking.
|
||||
*
|
||||
* This function wraps i915_wait_request(), see it for the full details on
|
||||
* the arguments.
|
||||
*
|
||||
* Returns 0 if successful, or a negative error code.
|
||||
*/
|
||||
static inline int
|
||||
i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret = 0;
|
||||
|
||||
request = i915_gem_active_get_unlocked(active);
|
||||
if (request) {
|
||||
ret = i915_wait_request(request, interruptible, timeout, rps);
|
||||
i915_gem_request_put(request);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_active_retire - waits until the request is retired
|
||||
* @active - the active request on which to wait
|
||||
*
|
||||
* i915_gem_active_retire() waits until the request is completed,
|
||||
* and then ensures that at least the retirement handler for this
|
||||
* @active tracker is called before returning. If the @active
|
||||
* tracker is idle, the function returns immediately.
|
||||
*/
|
||||
static inline int __must_check
|
||||
i915_gem_active_retire(struct i915_gem_active *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
request = rcu_dereference_protected(active->request,
|
||||
lockdep_is_held(mutex));
|
||||
if (!request)
|
||||
return 0;
|
||||
|
||||
ret = i915_wait_request(request, true, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_del_init(&active->link);
|
||||
RCU_INIT_POINTER(active->request, NULL);
|
||||
|
||||
active->retire(active, request);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Convenience functions for peeking at state inside active's request whilst
|
||||
* guarded by the struct_mutex.
|
||||
*/
|
||||
|
||||
static inline uint32_t
|
||||
i915_gem_active_get_seqno(const struct i915_gem_active *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
|
||||
}
|
||||
|
||||
static inline struct intel_engine_cs *
|
||||
i915_gem_active_get_engine(const struct i915_gem_active *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
|
||||
}
|
||||
|
||||
#define for_each_active(mask, idx) \
|
||||
for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
|
||||
|
||||
#endif /* I915_GEM_REQUEST_H */
|
|
@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
|||
#endif
|
||||
}
|
||||
|
||||
static int num_vma_bound(struct drm_i915_gem_object *obj)
|
||||
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
int count = 0;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
count++;
|
||||
if (vma->pin_count)
|
||||
count++;
|
||||
}
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (i915_vma_is_pinned(vma))
|
||||
return true;
|
||||
|
||||
return count;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool swap_available(void)
|
||||
|
@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
|
|||
* to the GPU, simply unbinding from the GPU is not going to succeed
|
||||
* in releasing our pin count on the pages themselves.
|
||||
*/
|
||||
if (obj->pages_pin_count != num_vma_bound(obj))
|
||||
if (obj->pages_pin_count > obj->bind_count)
|
||||
return false;
|
||||
|
||||
if (any_vma_pinned(obj))
|
||||
return false;
|
||||
|
||||
/* We can only return physical pages to the system if we can either
|
||||
|
@ -163,17 +162,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
for (phase = phases; phase->list; phase++) {
|
||||
struct list_head still_in_list;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if ((flags & phase->bit) == 0)
|
||||
continue;
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
while (count < target && !list_empty(phase->list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma, *v;
|
||||
|
||||
obj = list_first_entry(phase->list,
|
||||
typeof(*obj), global_list);
|
||||
while (count < target &&
|
||||
(obj = list_first_entry_or_null(phase->list,
|
||||
typeof(*obj),
|
||||
global_list))) {
|
||||
list_move_tail(&obj->global_list, &still_in_list);
|
||||
|
||||
if (flags & I915_SHRINK_PURGEABLE &&
|
||||
|
@ -184,24 +182,21 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
!is_vmalloc_addr(obj->mapping))
|
||||
continue;
|
||||
|
||||
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
|
||||
if ((flags & I915_SHRINK_ACTIVE) == 0 &&
|
||||
i915_gem_object_is_active(obj))
|
||||
continue;
|
||||
|
||||
if (!can_release_pages(obj))
|
||||
continue;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
i915_gem_object_get(obj);
|
||||
|
||||
/* For the unbound phase, this should be a no-op! */
|
||||
list_for_each_entry_safe(vma, v,
|
||||
&obj->vma_list, obj_link)
|
||||
if (i915_vma_unbind(vma))
|
||||
break;
|
||||
|
||||
i915_gem_object_unbind(obj);
|
||||
if (i915_gem_object_put_pages(obj) == 0)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
list_splice(&still_in_list, phase->list);
|
||||
}
|
||||
|
@ -210,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
/* expedite the RCU grace period to free some request slabs */
|
||||
synchronize_rcu_expedited();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -230,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return i915_gem_shrink(dev_priv, -1UL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE);
|
||||
unsigned long freed;
|
||||
|
||||
freed = i915_gem_shrink(dev_priv, -1UL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE);
|
||||
rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
||||
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
|
@ -242,9 +244,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
|||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return false;
|
||||
|
||||
if (to_i915(dev)->mm.shrinker_no_lock_stealing)
|
||||
return false;
|
||||
|
||||
*unlock = false;
|
||||
} else
|
||||
*unlock = true;
|
||||
|
@ -273,7 +272,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (!obj->active && can_release_pages(obj))
|
||||
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -321,17 +320,22 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
|
|||
struct shrinker_lock_uninterruptible *slu,
|
||||
int timeout_ms)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
|
||||
|
||||
do {
|
||||
if (i915_gem_wait_for_idle(dev_priv, false) == 0 &&
|
||||
i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
|
||||
break;
|
||||
|
||||
while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
|
||||
schedule_timeout_killable(1);
|
||||
if (fatal_signal_pending(current))
|
||||
return false;
|
||||
if (--timeout == 0) {
|
||||
|
||||
if (time_after(jiffies, timeout)) {
|
||||
pr_err("Unable to lock GPU to purge memory.\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} while (1);
|
||||
|
||||
slu->was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
@ -410,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
return NOTIFY_DONE;
|
||||
|
||||
/* Force everything onto the inactive lists */
|
||||
ret = i915_gem_wait_for_idle(dev_priv);
|
||||
ret = i915_gem_wait_for_idle(dev_priv, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -698,24 +698,24 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
|||
*/
|
||||
vma->node.start = gtt_offset;
|
||||
vma->node.size = size;
|
||||
if (drm_mm_initialized(&ggtt->base.mm)) {
|
||||
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
|
||||
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma->flags |= I915_VMA_GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
|
||||
obj->bind_count++;
|
||||
|
||||
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
i915_gem_object_pin_pages(obj);
|
||||
|
||||
return obj;
|
||||
|
||||
err:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -68,6 +68,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||
if (tiling_mode == I915_TILING_NONE)
|
||||
return true;
|
||||
|
||||
if (tiling_mode > I915_TILING_LAST)
|
||||
return false;
|
||||
|
||||
if (IS_GEN2(dev) ||
|
||||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||
tile_width = 128;
|
||||
|
@ -117,15 +120,16 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||
static bool
|
||||
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
u32 size;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return true;
|
||||
|
||||
if (INTEL_INFO(obj->base.dev)->gen >= 4)
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
return true;
|
||||
|
||||
if (IS_GEN3(obj->base.dev)) {
|
||||
if (IS_GEN3(dev_priv)) {
|
||||
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
|
||||
return false;
|
||||
} else {
|
||||
|
@ -133,7 +137,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
|||
return false;
|
||||
}
|
||||
|
||||
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
|
||||
size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode);
|
||||
if (i915_gem_obj_ggtt_size(obj) != size)
|
||||
return false;
|
||||
|
||||
|
@ -166,13 +170,16 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
|
||||
if (&obj->base == NULL)
|
||||
/* Make sure we don't cross-contaminate obj->tiling_and_stride */
|
||||
BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
|
||||
|
||||
obj = i915_gem_object_lookup(file, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
if (!i915_tiling_ok(dev,
|
||||
args->stride, obj->base.size, args->tiling_mode)) {
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -213,8 +220,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
}
|
||||
}
|
||||
|
||||
if (args->tiling_mode != obj->tiling_mode ||
|
||||
args->stride != obj->stride) {
|
||||
if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
|
||||
args->stride != i915_gem_object_get_stride(obj)) {
|
||||
/* We need to rebind the object if its current allocation
|
||||
* no longer meets the alignment restrictions for its new
|
||||
* tiling mode. Otherwise we can just leave it alone, but
|
||||
|
@ -237,24 +244,25 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (args->tiling_mode == I915_TILING_NONE)
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
if (obj->tiling_mode == I915_TILING_NONE)
|
||||
if (!i915_gem_object_is_tiled(obj))
|
||||
i915_gem_object_pin_pages(obj);
|
||||
}
|
||||
|
||||
obj->fence_dirty =
|
||||
obj->last_fenced_req ||
|
||||
!i915_gem_active_is_idle(&obj->last_fence,
|
||||
&dev->struct_mutex) ||
|
||||
obj->fence_reg != I915_FENCE_REG_NONE;
|
||||
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
obj->stride = args->stride;
|
||||
obj->tiling_and_stride =
|
||||
args->stride | args->tiling_mode;
|
||||
|
||||
/* Force the fence to be reacquired for GTT access */
|
||||
i915_gem_release_mmap(obj);
|
||||
}
|
||||
}
|
||||
/* we have to maintain this existing ABI... */
|
||||
args->stride = obj->stride;
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
args->stride = i915_gem_object_get_stride(obj);
|
||||
args->tiling_mode = i915_gem_object_get_tiling(obj);
|
||||
|
||||
/* Try to preallocate memory required to save swizzling on put-pages */
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj)) {
|
||||
|
@ -268,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
err:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
@ -297,14 +305,12 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
|
||||
if (&obj->base == NULL)
|
||||
obj = i915_gem_object_lookup(file, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
args->tiling_mode = obj->tiling_mode;
|
||||
switch (obj->tiling_mode) {
|
||||
args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
|
||||
switch (args->tiling_mode) {
|
||||
case I915_TILING_X:
|
||||
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
|
||||
break;
|
||||
|
@ -328,8 +334,6 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
|||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -63,33 +63,12 @@ struct i915_mmu_object {
|
|||
|
||||
static void wait_rendering(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
||||
int i, n;
|
||||
unsigned long active = __I915_BO_ACTIVE(obj);
|
||||
int idx;
|
||||
|
||||
if (!obj->active)
|
||||
return;
|
||||
|
||||
n = 0;
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
if (req == NULL)
|
||||
continue;
|
||||
|
||||
requests[n++] = i915_gem_request_reference(req);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
__i915_wait_request(requests[i], false, NULL, NULL);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
i915_gem_request_unreference(requests[i]);
|
||||
for_each_active(active, idx)
|
||||
i915_gem_active_wait_unlocked(&obj->last_read[idx],
|
||||
false, NULL, NULL);
|
||||
}
|
||||
|
||||
static void cancel_userptr(struct work_struct *work)
|
||||
|
@ -98,28 +77,19 @@ static void cancel_userptr(struct work_struct *work)
|
|||
struct drm_i915_gem_object *obj = mo->obj;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
|
||||
wait_rendering(obj);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
/* Cancel any active worker and force us to re-evaluate gup */
|
||||
obj->userptr.work = NULL;
|
||||
|
||||
if (obj->pages != NULL) {
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_vma *vma, *tmp;
|
||||
bool was_interruptible;
|
||||
|
||||
wait_rendering(obj);
|
||||
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
/* We are inside a kthread context and can't be interrupted */
|
||||
WARN_ON(i915_gem_object_unbind(obj));
|
||||
WARN_ON(i915_gem_object_put_pages(obj));
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
@ -577,7 +547,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
}
|
||||
|
||||
obj->userptr.workers--;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
release_pages(pvec, pinned, 0);
|
||||
|
@ -622,8 +592,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
|
|||
obj->userptr.work = &work->work;
|
||||
obj->userptr.workers++;
|
||||
|
||||
work->obj = obj;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
work->obj = i915_gem_object_get(obj);
|
||||
|
||||
work->task = current;
|
||||
get_task_struct(work->task);
|
||||
|
@ -846,7 +815,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||
ret = drm_gem_handle_create(file, &obj->base, &handle);
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -30,9 +30,9 @@
|
|||
#include <generated/utsrelease.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static const char *ring_str(int ring)
|
||||
static const char *engine_str(int engine)
|
||||
{
|
||||
switch (ring) {
|
||||
switch (engine) {
|
||||
case RCS: return "render";
|
||||
case VCS: return "bsd";
|
||||
case BCS: return "blt";
|
||||
|
@ -207,8 +207,8 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||
err_puts(m, dirty_flag(err->dirty));
|
||||
err_puts(m, purgeable_flag(err->purgeable));
|
||||
err_puts(m, err->userptr ? " userptr" : "");
|
||||
err_puts(m, err->ring != -1 ? " " : "");
|
||||
err_puts(m, ring_str(err->ring));
|
||||
err_puts(m, err->engine != -1 ? " " : "");
|
||||
err_puts(m, engine_str(err->engine));
|
||||
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
|
||||
|
||||
if (err->name)
|
||||
|
@ -221,7 +221,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
||||
static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
|
||||
{
|
||||
switch (a) {
|
||||
case HANGCHECK_IDLE:
|
||||
|
@ -239,70 +239,65 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
|||
return "unknown";
|
||||
}
|
||||
|
||||
static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_error_state *error,
|
||||
int ring_idx)
|
||||
static void error_print_engine(struct drm_i915_error_state_buf *m,
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
struct drm_i915_error_ring *ring = &error->ring[ring_idx];
|
||||
|
||||
if (!ring->valid)
|
||||
return;
|
||||
|
||||
err_printf(m, "%s command stream:\n", ring_str(ring_idx));
|
||||
err_printf(m, " START: 0x%08x\n", ring->start);
|
||||
err_printf(m, " HEAD: 0x%08x\n", ring->head);
|
||||
err_printf(m, " TAIL: 0x%08x\n", ring->tail);
|
||||
err_printf(m, " CTL: 0x%08x\n", ring->ctl);
|
||||
err_printf(m, " HWS: 0x%08x\n", ring->hws);
|
||||
err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
|
||||
err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
|
||||
err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
|
||||
err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
|
||||
err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
|
||||
err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
|
||||
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
|
||||
err_printf(m, " START: 0x%08x\n", ee->start);
|
||||
err_printf(m, " HEAD: 0x%08x\n", ee->head);
|
||||
err_printf(m, " TAIL: 0x%08x\n", ee->tail);
|
||||
err_printf(m, " CTL: 0x%08x\n", ee->ctl);
|
||||
err_printf(m, " HWS: 0x%08x\n", ee->hws);
|
||||
err_printf(m, " ACTHD: 0x%08x %08x\n",
|
||||
(u32)(ee->acthd>>32), (u32)ee->acthd);
|
||||
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
|
||||
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
|
||||
err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
|
||||
if (INTEL_GEN(m->i915) >= 4) {
|
||||
err_printf(m, " BBADDR: 0x%08x %08x\n",
|
||||
(u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
|
||||
err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
|
||||
err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
|
||||
}
|
||||
err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
|
||||
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
|
||||
lower_32_bits(ring->faddr));
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
|
||||
err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
|
||||
err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
|
||||
err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
|
||||
lower_32_bits(ee->faddr));
|
||||
if (INTEL_GEN(m->i915) >= 6) {
|
||||
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
|
||||
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
|
||||
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
|
||||
ring->semaphore_mboxes[0],
|
||||
ring->semaphore_seqno[0]);
|
||||
ee->semaphore_mboxes[0],
|
||||
ee->semaphore_seqno[0]);
|
||||
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
|
||||
ring->semaphore_mboxes[1],
|
||||
ring->semaphore_seqno[1]);
|
||||
if (HAS_VEBOX(dev)) {
|
||||
ee->semaphore_mboxes[1],
|
||||
ee->semaphore_seqno[1]);
|
||||
if (HAS_VEBOX(m->i915)) {
|
||||
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
|
||||
ring->semaphore_mboxes[2],
|
||||
ring->semaphore_seqno[2]);
|
||||
ee->semaphore_mboxes[2],
|
||||
ee->semaphore_seqno[2]);
|
||||
}
|
||||
}
|
||||
if (USES_PPGTT(dev)) {
|
||||
err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
|
||||
if (USES_PPGTT(m->i915)) {
|
||||
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
if (INTEL_GEN(m->i915) >= 8) {
|
||||
int i;
|
||||
for (i = 0; i < 4; i++)
|
||||
err_printf(m, " PDP%d: 0x%016llx\n",
|
||||
i, ring->vm_info.pdp[i]);
|
||||
i, ee->vm_info.pdp[i]);
|
||||
} else {
|
||||
err_printf(m, " PP_DIR_BASE: 0x%08x\n",
|
||||
ring->vm_info.pp_dir_base);
|
||||
ee->vm_info.pp_dir_base);
|
||||
}
|
||||
}
|
||||
err_printf(m, " seqno: 0x%08x\n", ring->seqno);
|
||||
err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
|
||||
err_printf(m, " waiting: %s\n", yesno(ring->waiting));
|
||||
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
|
||||
err_printf(m, " seqno: 0x%08x\n", ee->seqno);
|
||||
err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
|
||||
err_printf(m, " waiting: %s\n", yesno(ee->waiting));
|
||||
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
|
||||
err_printf(m, " hangcheck: %s [%d]\n",
|
||||
hangcheck_action_to_str(ring->hangcheck_action),
|
||||
ring->hangcheck_score);
|
||||
hangcheck_action_to_str(ee->hangcheck_action),
|
||||
ee->hangcheck_score);
|
||||
}
|
||||
|
||||
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
|
||||
|
@ -348,17 +343,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
error->time.tv_usec);
|
||||
err_printf(m, "Kernel: " UTS_RELEASE "\n");
|
||||
max_hangcheck_score = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
if (error->ring[i].hangcheck_score > max_hangcheck_score)
|
||||
max_hangcheck_score = error->ring[i].hangcheck_score;
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
if (error->engine[i].hangcheck_score > max_hangcheck_score)
|
||||
max_hangcheck_score = error->engine[i].hangcheck_score;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
if (error->ring[i].hangcheck_score == max_hangcheck_score &&
|
||||
error->ring[i].pid != -1) {
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
if (error->engine[i].hangcheck_score == max_hangcheck_score &&
|
||||
error->engine[i].pid != -1) {
|
||||
err_printf(m, "Active process (on ring %s): %s [%d]\n",
|
||||
ring_str(i),
|
||||
error->ring[i].comm,
|
||||
error->ring[i].pid);
|
||||
engine_str(i),
|
||||
error->engine[i].comm,
|
||||
error->engine[i].pid);
|
||||
}
|
||||
}
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
|
@ -414,8 +409,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
if (IS_GEN7(dev))
|
||||
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
|
||||
i915_ring_error_state(m, dev, error, i);
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
if (error->engine[i].engine_id != -1)
|
||||
error_print_engine(m, &error->engine[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < error->vm_count; i++) {
|
||||
err_printf(m, "vm[%d]\n", i);
|
||||
|
@ -429,21 +426,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
error->pinned_bo_count[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
obj = error->ring[i].batchbuffer;
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
struct drm_i915_error_engine *ee = &error->engine[i];
|
||||
|
||||
obj = ee->batchbuffer;
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->engine[i].name);
|
||||
if (error->ring[i].pid != -1)
|
||||
if (ee->pid != -1)
|
||||
err_printf(m, " (submitted by %s [%d])",
|
||||
error->ring[i].comm,
|
||||
error->ring[i].pid);
|
||||
ee->comm,
|
||||
ee->pid);
|
||||
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
|
||||
upper_32_bits(obj->gtt_offset),
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
obj = error->ring[i].wa_batchbuffer;
|
||||
obj = ee->wa_batchbuffer;
|
||||
if (obj) {
|
||||
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
|
||||
dev_priv->engine[i].name,
|
||||
|
@ -451,38 +450,38 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if (error->ring[i].num_requests) {
|
||||
if (ee->num_requests) {
|
||||
err_printf(m, "%s --- %d requests\n",
|
||||
dev_priv->engine[i].name,
|
||||
error->ring[i].num_requests);
|
||||
for (j = 0; j < error->ring[i].num_requests; j++) {
|
||||
ee->num_requests);
|
||||
for (j = 0; j < ee->num_requests; j++) {
|
||||
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
|
||||
error->ring[i].requests[j].seqno,
|
||||
error->ring[i].requests[j].jiffies,
|
||||
error->ring[i].requests[j].tail);
|
||||
ee->requests[j].seqno,
|
||||
ee->requests[j].jiffies,
|
||||
ee->requests[j].tail);
|
||||
}
|
||||
}
|
||||
|
||||
if (error->ring[i].num_waiters) {
|
||||
if (ee->num_waiters) {
|
||||
err_printf(m, "%s --- %d waiters\n",
|
||||
dev_priv->engine[i].name,
|
||||
error->ring[i].num_waiters);
|
||||
for (j = 0; j < error->ring[i].num_waiters; j++) {
|
||||
ee->num_waiters);
|
||||
for (j = 0; j < ee->num_waiters; j++) {
|
||||
err_printf(m, " seqno 0x%08x for %s [%d]\n",
|
||||
error->ring[i].waiters[j].seqno,
|
||||
error->ring[i].waiters[j].comm,
|
||||
error->ring[i].waiters[j].pid);
|
||||
ee->waiters[j].seqno,
|
||||
ee->waiters[j].comm,
|
||||
ee->waiters[j].pid);
|
||||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ringbuffer)) {
|
||||
if ((obj = ee->ringbuffer)) {
|
||||
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
print_error_obj(m, obj);
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].hws_page)) {
|
||||
if ((obj = ee->hws_page)) {
|
||||
u64 hws_offset = obj->gtt_offset;
|
||||
u32 *hws_page = &obj->pages[0][0];
|
||||
|
||||
|
@ -504,7 +503,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
obj = error->ring[i].wa_ctx;
|
||||
obj = ee->wa_ctx;
|
||||
if (obj) {
|
||||
u64 wa_ctx_offset = obj->gtt_offset;
|
||||
u32 *wa_ctx_page = &obj->pages[0][0];
|
||||
|
@ -526,7 +525,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
|||
}
|
||||
}
|
||||
|
||||
if ((obj = error->ring[i].ctx)) {
|
||||
if ((obj = ee->ctx)) {
|
||||
err_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->engine[i].name,
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
|
@ -611,15 +610,18 @@ static void i915_error_state_free(struct kref *error_ref)
|
|||
typeof(*error), ref);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
|
||||
i915_error_object_free(error->ring[i].batchbuffer);
|
||||
i915_error_object_free(error->ring[i].wa_batchbuffer);
|
||||
i915_error_object_free(error->ring[i].ringbuffer);
|
||||
i915_error_object_free(error->ring[i].hws_page);
|
||||
i915_error_object_free(error->ring[i].ctx);
|
||||
i915_error_object_free(error->ring[i].wa_ctx);
|
||||
kfree(error->ring[i].requests);
|
||||
kfree(error->ring[i].waiters);
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
struct drm_i915_error_engine *ee = &error->engine[i];
|
||||
|
||||
i915_error_object_free(ee->batchbuffer);
|
||||
i915_error_object_free(ee->wa_batchbuffer);
|
||||
i915_error_object_free(ee->ringbuffer);
|
||||
i915_error_object_free(ee->hws_page);
|
||||
i915_error_object_free(ee->ctx);
|
||||
i915_error_object_free(ee->wa_ctx);
|
||||
|
||||
kfree(ee->requests);
|
||||
kfree(ee->waiters);
|
||||
}
|
||||
|
||||
i915_error_object_free(error->semaphore_obj);
|
||||
|
@ -667,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
|||
if (i915_is_ggtt(vm))
|
||||
vma = i915_gem_obj_to_ggtt(src);
|
||||
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
|
||||
vma && (vma->bound & GLOBAL_BIND) &&
|
||||
vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
|
||||
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
|
||||
|
||||
/* Cannot access stolen address directly, try to use the aperture */
|
||||
if (src->stolen) {
|
||||
use_ggtt = true;
|
||||
|
||||
if (!(vma && vma->bound & GLOBAL_BIND))
|
||||
if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
|
||||
goto unwind;
|
||||
|
||||
reloc_offset = i915_gem_obj_ggtt_offset(src);
|
||||
|
@ -740,6 +742,24 @@ unwind:
|
|||
#define i915_error_ggtt_object_create(dev_priv, src) \
|
||||
i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
|
||||
|
||||
/* The error capture is special as tries to run underneath the normal
|
||||
* locking rules - so we use the raw version of the i915_gem_active lookup.
|
||||
*/
|
||||
static inline uint32_t
|
||||
__active_get_seqno(struct i915_gem_active *active)
|
||||
{
|
||||
return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
|
||||
}
|
||||
|
||||
static inline int
|
||||
__active_get_engine_id(struct i915_gem_active *active)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
|
||||
return engine ? engine->id : -1;
|
||||
}
|
||||
|
||||
static void capture_bo(struct drm_i915_error_buffer *err,
|
||||
struct i915_vma *vma)
|
||||
{
|
||||
|
@ -748,9 +768,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
|||
|
||||
err->size = obj->base.size;
|
||||
err->name = obj->base.name;
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
|
||||
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
|
||||
err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
|
||||
err->wseqno = __active_get_seqno(&obj->last_write);
|
||||
err->engine = __active_get_engine_id(&obj->last_write);
|
||||
|
||||
err->gtt_offset = vma->node.start;
|
||||
err->read_domains = obj->base.read_domains;
|
||||
err->write_domain = obj->base.write_domain;
|
||||
|
@ -758,12 +781,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
|
|||
err->pinned = 0;
|
||||
if (i915_gem_obj_is_pinned(obj))
|
||||
err->pinned = 1;
|
||||
err->tiling = obj->tiling_mode;
|
||||
err->tiling = i915_gem_object_get_tiling(obj);
|
||||
err->dirty = obj->dirty;
|
||||
err->purgeable = obj->madv != I915_MADV_WILLNEED;
|
||||
err->userptr = obj->userptr.mm != NULL;
|
||||
err->ring = obj->last_write_req ?
|
||||
i915_gem_request_get_engine(obj->last_write_req)->id : -1;
|
||||
err->cache_level = obj->cache_level;
|
||||
}
|
||||
|
||||
|
@ -797,7 +818,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
|||
break;
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->vm == vm && vma->pin_count > 0)
|
||||
if (vma->vm == vm && i915_vma_is_pinned(vma))
|
||||
capture_bo(err++, vma);
|
||||
}
|
||||
|
||||
|
@ -815,7 +836,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
|
|||
*/
|
||||
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
int *ring_id)
|
||||
int *engine_id)
|
||||
{
|
||||
uint32_t error_code = 0;
|
||||
int i;
|
||||
|
@ -826,11 +847,11 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
|||
* strictly a client bug. Use instdone to differentiate those some.
|
||||
*/
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
|
||||
if (ring_id)
|
||||
*ring_id = i;
|
||||
if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
|
||||
if (engine_id)
|
||||
*engine_id = i;
|
||||
|
||||
return error->ring[i].ipehr ^ error->ring[i].instdone;
|
||||
return error->engine[i].ipehr ^ error->engine[i].instdone;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -855,21 +876,16 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
|
||||
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_engine_cs *to;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (!i915_semaphore_is_enabled(dev_priv))
|
||||
return;
|
||||
|
||||
if (!error->semaphore_obj)
|
||||
error->semaphore_obj =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
dev_priv->semaphore_obj);
|
||||
return;
|
||||
|
||||
for_each_engine_id(to, dev_priv, id) {
|
||||
int idx;
|
||||
|
@ -879,42 +895,43 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||
if (engine == to)
|
||||
continue;
|
||||
|
||||
signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
|
||||
/ 4;
|
||||
signal_offset =
|
||||
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
|
||||
tmp = error->semaphore_obj->pages[0];
|
||||
idx = intel_ring_sync_index(engine, to);
|
||||
idx = intel_engine_sync_index(engine, to);
|
||||
|
||||
ering->semaphore_mboxes[idx] = tmp[signal_offset];
|
||||
ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
|
||||
ee->semaphore_mboxes[idx] = tmp[signal_offset];
|
||||
ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
|
||||
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
|
||||
ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
|
||||
ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
|
||||
ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
|
||||
ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
|
||||
ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
|
||||
|
||||
if (HAS_VEBOX(dev_priv)) {
|
||||
ering->semaphore_mboxes[2] =
|
||||
ee->semaphore_mboxes[2] =
|
||||
I915_READ(RING_SYNC_2(engine->mmio_base));
|
||||
ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
|
||||
ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
|
||||
}
|
||||
}
|
||||
|
||||
static void engine_record_waiters(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
static void error_record_engine_waiters(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct drm_i915_error_waiter *waiter;
|
||||
struct rb_node *rb;
|
||||
int count;
|
||||
|
||||
ering->num_waiters = 0;
|
||||
ering->waiters = NULL;
|
||||
ee->num_waiters = 0;
|
||||
ee->waiters = NULL;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
count = 0;
|
||||
|
@ -930,7 +947,7 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
|
|||
if (!waiter)
|
||||
return;
|
||||
|
||||
ering->waiters = waiter;
|
||||
ee->waiters = waiter;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
|
||||
|
@ -941,55 +958,55 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
|
|||
waiter->seqno = w->seqno;
|
||||
waiter++;
|
||||
|
||||
if (++ering->num_waiters == count)
|
||||
if (++ee->num_waiters == count)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&b->lock);
|
||||
}
|
||||
|
||||
static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_ring *ering)
|
||||
static void error_record_engine_registers(struct drm_i915_error_state *error,
|
||||
struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
||||
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
|
||||
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
||||
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
gen8_record_semaphore_state(dev_priv, error, engine,
|
||||
ering);
|
||||
gen8_record_semaphore_state(error, engine, ee);
|
||||
else
|
||||
gen6_record_semaphore_state(dev_priv, engine, ering);
|
||||
gen6_record_semaphore_state(engine, ee);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
||||
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
|
||||
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
||||
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||
ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
||||
ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||
ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
|
||||
ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
||||
ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
||||
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
||||
ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
||||
ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
||||
}
|
||||
ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
|
||||
ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
|
||||
} else {
|
||||
ering->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
ering->ipeir = I915_READ(IPEIR);
|
||||
ering->ipehr = I915_READ(IPEHR);
|
||||
ering->instdone = I915_READ(GEN2_INSTDONE);
|
||||
ee->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
ee->ipeir = I915_READ(IPEIR);
|
||||
ee->ipehr = I915_READ(IPEHR);
|
||||
ee->instdone = I915_READ(GEN2_INSTDONE);
|
||||
}
|
||||
|
||||
ering->waiting = intel_engine_has_waiter(engine);
|
||||
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ering->acthd = intel_ring_get_active_head(engine);
|
||||
ering->seqno = intel_engine_get_seqno(engine);
|
||||
ering->last_seqno = engine->last_submitted_seqno;
|
||||
ering->start = I915_READ_START(engine);
|
||||
ering->head = I915_READ_HEAD(engine);
|
||||
ering->tail = I915_READ_TAIL(engine);
|
||||
ering->ctl = I915_READ_CTL(engine);
|
||||
ee->waiting = intel_engine_has_waiter(engine);
|
||||
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ee->acthd = intel_engine_get_active_head(engine);
|
||||
ee->seqno = intel_engine_get_seqno(engine);
|
||||
ee->last_seqno = engine->last_submitted_seqno;
|
||||
ee->start = I915_READ_START(engine);
|
||||
ee->head = I915_READ_HEAD(engine);
|
||||
ee->tail = I915_READ_TAIL(engine);
|
||||
ee->ctl = I915_READ_CTL(engine);
|
||||
|
||||
if (I915_NEED_GFX_HWS(dev_priv)) {
|
||||
i915_reg_t mmio;
|
||||
|
@ -1017,29 +1034,29 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
|||
mmio = RING_HWS_PGA(engine->mmio_base);
|
||||
}
|
||||
|
||||
ering->hws = I915_READ(mmio);
|
||||
ee->hws = I915_READ(mmio);
|
||||
}
|
||||
|
||||
ering->hangcheck_score = engine->hangcheck.score;
|
||||
ering->hangcheck_action = engine->hangcheck.action;
|
||||
ee->hangcheck_score = engine->hangcheck.score;
|
||||
ee->hangcheck_action = engine->hangcheck.action;
|
||||
|
||||
if (USES_PPGTT(dev_priv)) {
|
||||
int i;
|
||||
|
||||
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
ering->vm_info.pp_dir_base =
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
||||
else if (IS_GEN7(dev_priv))
|
||||
ering->vm_info.pp_dir_base =
|
||||
ee->vm_info.pp_dir_base =
|
||||
I915_READ(RING_PP_DIR_BASE(engine));
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
for (i = 0; i < 4; i++) {
|
||||
ering->vm_info.pdp[i] =
|
||||
ee->vm_info.pdp[i] =
|
||||
I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||
ering->vm_info.pdp[i] <<= 32;
|
||||
ering->vm_info.pdp[i] |=
|
||||
ee->vm_info.pdp[i] <<= 32;
|
||||
ee->vm_info.pdp[i] |=
|
||||
I915_READ(GEN8_RING_PDP_LDW(engine, i));
|
||||
}
|
||||
}
|
||||
|
@ -1048,7 +1065,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
|||
|
||||
static void i915_gem_record_active_context(struct intel_engine_cs *engine,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -1062,7 +1079,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
|
|||
continue;
|
||||
|
||||
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
|
||||
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
|
||||
ee->ctx = i915_error_ggtt_object_create(dev_priv, obj);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1075,23 +1092,31 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_gem_request *request;
|
||||
int i, count;
|
||||
|
||||
if (dev_priv->semaphore_obj) {
|
||||
error->semaphore_obj =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
dev_priv->semaphore_obj);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[i];
|
||||
struct drm_i915_error_engine *ee = &error->engine[i];
|
||||
|
||||
error->ring[i].pid = -1;
|
||||
ee->pid = -1;
|
||||
ee->engine_id = -1;
|
||||
|
||||
if (!intel_engine_initialized(engine))
|
||||
continue;
|
||||
|
||||
error->ring[i].valid = true;
|
||||
ee->engine_id = i;
|
||||
|
||||
i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
|
||||
engine_record_waiters(engine, &error->ring[i]);
|
||||
error_record_engine_registers(error, engine, ee);
|
||||
error_record_engine_waiters(engine, ee);
|
||||
|
||||
request = i915_gem_find_active_request(engine);
|
||||
if (request) {
|
||||
struct i915_address_space *vm;
|
||||
struct intel_ringbuffer *rb;
|
||||
struct intel_ring *ring;
|
||||
|
||||
vm = request->ctx->ppgtt ?
|
||||
&request->ctx->ppgtt->base : &ggtt->base;
|
||||
|
@ -1100,15 +1125,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
* as the simplest method to avoid being overwritten
|
||||
* by userspace.
|
||||
*/
|
||||
error->ring[i].batchbuffer =
|
||||
ee->batchbuffer =
|
||||
i915_error_object_create(dev_priv,
|
||||
request->batch_obj,
|
||||
vm);
|
||||
|
||||
if (HAS_BROKEN_CS_TLB(dev_priv))
|
||||
error->ring[i].wa_batchbuffer =
|
||||
ee->wa_batchbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->scratch.obj);
|
||||
engine->scratch.obj);
|
||||
|
||||
if (request->pid) {
|
||||
struct task_struct *task;
|
||||
|
@ -1116,8 +1141,8 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
rcu_read_lock();
|
||||
task = pid_task(request->pid, PIDTYPE_PID);
|
||||
if (task) {
|
||||
strcpy(error->ring[i].comm, task->comm);
|
||||
error->ring[i].pid = task->pid;
|
||||
strcpy(ee->comm, task->comm);
|
||||
ee->pid = task->pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -1125,44 +1150,40 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
error->simulated |=
|
||||
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
|
||||
|
||||
rb = request->ringbuf;
|
||||
error->ring[i].cpu_ring_head = rb->head;
|
||||
error->ring[i].cpu_ring_tail = rb->tail;
|
||||
error->ring[i].ringbuffer =
|
||||
ring = request->ring;
|
||||
ee->cpu_ring_head = ring->head;
|
||||
ee->cpu_ring_tail = ring->tail;
|
||||
ee->ringbuffer =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
rb->obj);
|
||||
ring->obj);
|
||||
}
|
||||
|
||||
error->ring[i].hws_page =
|
||||
ee->hws_page =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->status_page.obj);
|
||||
|
||||
if (engine->wa_ctx.obj) {
|
||||
error->ring[i].wa_ctx =
|
||||
i915_error_ggtt_object_create(dev_priv,
|
||||
engine->wa_ctx.obj);
|
||||
}
|
||||
ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
|
||||
engine->wa_ctx.obj);
|
||||
|
||||
i915_gem_record_active_context(engine, error, &error->ring[i]);
|
||||
i915_gem_record_active_context(engine, error, ee);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &engine->request_list, list)
|
||||
list_for_each_entry(request, &engine->request_list, link)
|
||||
count++;
|
||||
|
||||
error->ring[i].num_requests = count;
|
||||
error->ring[i].requests =
|
||||
kcalloc(count, sizeof(*error->ring[i].requests),
|
||||
GFP_ATOMIC);
|
||||
if (error->ring[i].requests == NULL) {
|
||||
error->ring[i].num_requests = 0;
|
||||
ee->num_requests = count;
|
||||
ee->requests =
|
||||
kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
|
||||
if (!ee->requests) {
|
||||
ee->num_requests = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &engine->request_list, list) {
|
||||
list_for_each_entry(request, &engine->request_list, link) {
|
||||
struct drm_i915_error_request *erq;
|
||||
|
||||
if (count >= error->ring[i].num_requests) {
|
||||
if (count >= ee->num_requests) {
|
||||
/*
|
||||
* If the ring request list was changed in
|
||||
* between the point where the error request
|
||||
|
@ -1181,8 +1202,8 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
|||
break;
|
||||
}
|
||||
|
||||
erq = &error->ring[i].requests[count++];
|
||||
erq->seqno = request->seqno;
|
||||
erq = &ee->requests[count++];
|
||||
erq->seqno = request->fence.seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
erq->tail = request->postfix;
|
||||
}
|
||||
|
@ -1209,7 +1230,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
|
|||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link)
|
||||
if (vma->vm == vm && vma->pin_count > 0)
|
||||
if (vma->vm == vm && i915_vma_is_pinned(vma))
|
||||
i++;
|
||||
}
|
||||
error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
|
||||
|
@ -1352,20 +1373,20 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
|
|||
const char *error_msg)
|
||||
{
|
||||
u32 ecode;
|
||||
int ring_id = -1, len;
|
||||
int engine_id = -1, len;
|
||||
|
||||
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
|
||||
ecode = i915_error_generate_code(dev_priv, error, &engine_id);
|
||||
|
||||
len = scnprintf(error->error_msg, sizeof(error->error_msg),
|
||||
"GPU HANG: ecode %d:%d:0x%08x",
|
||||
INTEL_GEN(dev_priv), ring_id, ecode);
|
||||
INTEL_GEN(dev_priv), engine_id, ecode);
|
||||
|
||||
if (ring_id != -1 && error->ring[ring_id].pid != -1)
|
||||
if (engine_id != -1 && error->engine[engine_id].pid != -1)
|
||||
len += scnprintf(error->error_msg + len,
|
||||
sizeof(error->error_msg) - len,
|
||||
", in %s [%d]",
|
||||
error->ring[ring_id].comm,
|
||||
error->ring[ring_id].pid);
|
||||
error->engine[engine_id].comm,
|
||||
error->engine[engine_id].pid);
|
||||
|
||||
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
|
||||
", reason: %s, action: %s",
|
||||
|
|
|
@ -363,7 +363,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
|||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ce->ringbuf->obj;
|
||||
obj = ce->ring->obj;
|
||||
gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
|
||||
lrc->ring_begin = gfx_addr;
|
||||
|
@ -506,7 +506,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
|
|||
rq->engine);
|
||||
|
||||
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
|
||||
wqi->fence_id = rq->seqno;
|
||||
wqi->fence_id = rq->fence.seqno;
|
||||
|
||||
kunmap_atomic(base);
|
||||
}
|
||||
|
@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
|||
* The only error here arises if the doorbell hardware isn't functioning
|
||||
* as expected, which really shouln't happen.
|
||||
*/
|
||||
int i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
unsigned int engine_id = rq->engine->id;
|
||||
struct intel_guc *guc = &rq->i915->guc;
|
||||
|
@ -601,9 +601,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
|
|||
client->b_fail += 1;
|
||||
|
||||
guc->submissions[engine_id] += 1;
|
||||
guc->last_seqno[engine_id] = rq->seqno;
|
||||
|
||||
return b_ret;
|
||||
guc->last_seqno[engine_id] = rq->fence.seqno;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -633,13 +631,13 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
|
|||
return NULL;
|
||||
|
||||
if (i915_gem_object_get_pages(obj)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
if (i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
|
||||
i915_gem_object_put(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -661,7 +659,7 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
|
|||
if (i915_gem_obj_is_pinned(obj))
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_guc_client *client;
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
/* client for execbuf submission */
|
||||
client = guc_client_alloc(dev_priv,
|
||||
|
@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
|||
host2guc_sample_forcewake(guc, client);
|
||||
guc_init_doorbell_hw(guc);
|
||||
|
||||
/* Take over from manual control of ELSP (execlists) */
|
||||
for_each_engine(engine, dev_priv)
|
||||
engine->submit_request = i915_guc_submit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1013,6 +1016,12 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
if (!guc->execbuf_client)
|
||||
return;
|
||||
|
||||
/* Revert back to manual ELSP submission */
|
||||
intel_execlists_enable_submission(dev_priv);
|
||||
|
||||
guc_client_free(dev_priv, guc->execbuf_client);
|
||||
guc->execbuf_client = NULL;
|
||||
}
|
||||
|
|
|
@ -656,12 +656,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
|
|||
* of horizontal active on the first line of vertical active
|
||||
*/
|
||||
|
||||
static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
/* Gen2 doesn't have a hardware frame counter */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called from drm generic code, passed a 'crtc', which
|
||||
* we use as a pipe index
|
||||
*/
|
||||
|
@ -1105,9 +1099,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
new_delay = dev_priv->rps.cur_freq;
|
||||
min = dev_priv->rps.min_freq_softlimit;
|
||||
max = dev_priv->rps.max_freq_softlimit;
|
||||
|
||||
if (client_boost) {
|
||||
new_delay = dev_priv->rps.max_freq_softlimit;
|
||||
if (client_boost || any_waiters(dev_priv))
|
||||
max = dev_priv->rps.max_freq;
|
||||
if (client_boost && new_delay < dev_priv->rps.boost_freq) {
|
||||
new_delay = dev_priv->rps.boost_freq;
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
|
||||
if (adj > 0)
|
||||
|
@ -1122,7 +1117,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||
new_delay = dev_priv->rps.efficient_freq;
|
||||
adj = 0;
|
||||
}
|
||||
} else if (any_waiters(dev_priv)) {
|
||||
} else if (client_boost || any_waiters(dev_priv)) {
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
||||
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
|
||||
|
@ -2803,13 +2798,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
|||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
static bool
|
||||
ring_idle(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
return i915_seqno_passed(seqno,
|
||||
READ_ONCE(engine->last_submitted_seqno));
|
||||
}
|
||||
|
||||
static bool
|
||||
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
|
||||
{
|
||||
|
@ -2859,6 +2847,7 @@ static struct intel_engine_cs *
|
|||
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
void __iomem *vaddr;
|
||||
u32 cmd, ipehr, head;
|
||||
u64 offset = 0;
|
||||
int i, backwards;
|
||||
|
@ -2897,6 +2886,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||
*/
|
||||
head = I915_READ_HEAD(engine) & HEAD_ADDR;
|
||||
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
|
||||
vaddr = (void __iomem *)engine->buffer->vaddr;
|
||||
|
||||
for (i = backwards; i; --i) {
|
||||
/*
|
||||
|
@ -2907,7 +2897,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||
head &= engine->buffer->size - 1;
|
||||
|
||||
/* This here seems to blow up */
|
||||
cmd = ioread32(engine->buffer->virtual_start + head);
|
||||
cmd = ioread32(vaddr + head);
|
||||
if (cmd == ipehr)
|
||||
break;
|
||||
|
||||
|
@ -2917,11 +2907,11 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||
if (!i)
|
||||
return NULL;
|
||||
|
||||
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
|
||||
*seqno = ioread32(vaddr + head + 4) + 1;
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
offset = ioread32(engine->buffer->virtual_start + head + 12);
|
||||
offset = ioread32(vaddr + head + 12);
|
||||
offset <<= 32;
|
||||
offset = ioread32(engine->buffer->virtual_start + head + 8);
|
||||
offset |= ioread32(vaddr + head + 8);
|
||||
}
|
||||
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
|
||||
}
|
||||
|
@ -2990,7 +2980,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
|
|||
return stuck;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
static enum intel_engine_hangcheck_action
|
||||
head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
if (acthd != engine->hangcheck.acthd) {
|
||||
|
@ -3008,11 +2998,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||
return HANGCHECK_HUNG;
|
||||
}
|
||||
|
||||
static enum intel_ring_hangcheck_action
|
||||
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
static enum intel_engine_hangcheck_action
|
||||
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
enum intel_ring_hangcheck_action ha;
|
||||
enum intel_engine_hangcheck_action ha;
|
||||
u32 tmp;
|
||||
|
||||
ha = head_stuck(engine, acthd);
|
||||
|
@ -3121,14 +3111,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
acthd = intel_ring_get_active_head(engine);
|
||||
acthd = intel_engine_get_active_head(engine);
|
||||
seqno = intel_engine_get_seqno(engine);
|
||||
|
||||
/* Reset stuck interrupts between batch advances */
|
||||
user_interrupts = 0;
|
||||
|
||||
if (engine->hangcheck.seqno == seqno) {
|
||||
if (ring_idle(engine, seqno)) {
|
||||
if (!intel_engine_is_active(engine)) {
|
||||
engine->hangcheck.action = HANGCHECK_IDLE;
|
||||
if (busy) {
|
||||
/* Safeguard against driver failure */
|
||||
|
@ -3137,13 +3127,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
}
|
||||
} else {
|
||||
/* We always increment the hangcheck score
|
||||
* if the ring is busy and still processing
|
||||
* if the engine is busy and still processing
|
||||
* the same request, so that no single request
|
||||
* can run indefinitely (such as a chain of
|
||||
* batches). The only time we do not increment
|
||||
* the hangcheck score on this ring, if this
|
||||
* ring is in a legitimate wait for another
|
||||
* ring. In that case the waiting ring is a
|
||||
* engine is in a legitimate wait for another
|
||||
* engine. In that case the waiting engine is a
|
||||
* victim and we want to be sure we catch the
|
||||
* right culprit. Then every time we do kick
|
||||
* the ring, add a small increment to the
|
||||
|
@ -3151,8 +3141,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
* being repeatedly kicked and so responsible
|
||||
* for stalling the machine.
|
||||
*/
|
||||
engine->hangcheck.action = ring_stuck(engine,
|
||||
acthd);
|
||||
engine->hangcheck.action =
|
||||
engine_stuck(engine, acthd);
|
||||
|
||||
switch (engine->hangcheck.action) {
|
||||
case HANGCHECK_IDLE:
|
||||
|
@ -4542,8 +4532,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
|||
i915_hangcheck_elapsed);
|
||||
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
/* Gen2 doesn't have a hardware frame counter */
|
||||
dev->max_vblank_count = 0;
|
||||
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
||||
dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
|
||||
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
|
||||
|
|
|
@ -173,6 +173,7 @@ static const struct intel_device_info intel_pineview_info = {
|
|||
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.ring_mask = RENDER_RING,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
|
|
@ -186,13 +186,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define GEN9_GRDOM_GUC (1 << 5)
|
||||
#define GEN8_GRDOM_MEDIA2 (1 << 7)
|
||||
|
||||
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
|
||||
#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
|
||||
#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220)
|
||||
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
|
||||
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
|
||||
#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
|
||||
#define PP_DIR_DCLV_2G 0xffffffff
|
||||
|
||||
#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
|
||||
#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8)
|
||||
#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
|
||||
#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
|
||||
|
||||
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
|
||||
#define GEN8_RPCS_ENABLE (1 << 31)
|
||||
|
@ -1536,6 +1536,7 @@ enum skl_disp_power_wells {
|
|||
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
|
||||
/* Balance leg disable bits */
|
||||
#define BALANCE_LEG_DISABLE_SHIFT 23
|
||||
#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
|
||||
|
||||
/*
|
||||
* Fence registers
|
||||
|
@ -1647,7 +1648,7 @@ enum skl_disp_power_wells {
|
|||
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
|
||||
#define ARB_MODE_SWIZZLE_BDW (1<<1)
|
||||
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
|
||||
#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id)
|
||||
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
|
||||
#define RING_FAULT_GTTSEL_MASK (1<<11)
|
||||
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
|
||||
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
|
||||
|
@ -1845,7 +1846,7 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define GFX_MODE _MMIO(0x2520)
|
||||
#define GFX_MODE_GEN7 _MMIO(0x229c)
|
||||
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
|
||||
#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
|
||||
#define GFX_RUN_LIST_ENABLE (1<<15)
|
||||
#define GFX_INTERRUPT_STEERING (1<<14)
|
||||
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
|
||||
|
@ -6132,6 +6133,7 @@ enum {
|
|||
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
|
||||
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
|
||||
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
|
||||
# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
|
||||
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
|
||||
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
|
||||
|
||||
|
@ -6958,6 +6960,9 @@ enum {
|
|||
#define ECOBUS _MMIO(0xa180)
|
||||
#define FORCEWAKE_MT_ENABLE (1<<5)
|
||||
#define VLV_SPAREG2H _MMIO(0xA194)
|
||||
#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
|
||||
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
|
||||
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
|
||||
|
||||
#define GTFIFODBG _MMIO(0x120000)
|
||||
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
|
||||
|
@ -7485,6 +7490,7 @@ enum {
|
|||
#define _DDI_BUF_TRANS_A 0x64E00
|
||||
#define _DDI_BUF_TRANS_B 0x64E60
|
||||
#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
|
||||
#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
|
||||
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
|
||||
|
||||
/* Sideband Interface (SBI) is programmed indirectly, via
|
||||
|
|
|
@ -271,8 +271,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
@ -303,19 +301,46 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
|||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->rps.cur_freq));
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_i915_private *dev_priv = to_i915(minor->dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->rps.boost_freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 val;
|
||||
ssize_t ret;
|
||||
|
||||
ret = kstrtou32(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Validate against (static) hardware limits */
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
|
||||
dev_priv->rps.boost_freq = val;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
||||
|
@ -325,9 +350,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
|||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE,
|
||||
"%d\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->rps.efficient_freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
|
@ -335,15 +360,10 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->rps.max_freq_softlimit));
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
@ -360,8 +380,6 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
@ -403,15 +421,10 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
|
|||
struct drm_minor *minor = dev_to_drm_minor(kdev);
|
||||
struct drm_device *dev = minor->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->rps.min_freq_softlimit));
|
||||
}
|
||||
|
||||
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
|
@ -428,8 +441,6 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
@ -465,6 +476,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
|||
|
||||
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
|
||||
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
|
||||
static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
|
||||
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
|
||||
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
|
||||
|
||||
|
@ -498,6 +510,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
|
|||
static const struct attribute *gen6_attrs[] = {
|
||||
&dev_attr_gt_act_freq_mhz.attr,
|
||||
&dev_attr_gt_cur_freq_mhz.attr,
|
||||
&dev_attr_gt_boost_freq_mhz.attr,
|
||||
&dev_attr_gt_max_freq_mhz.attr,
|
||||
&dev_attr_gt_min_freq_mhz.attr,
|
||||
&dev_attr_gt_RP0_freq_mhz.attr,
|
||||
|
@ -509,6 +522,7 @@ static const struct attribute *gen6_attrs[] = {
|
|||
static const struct attribute *vlv_attrs[] = {
|
||||
&dev_attr_gt_act_freq_mhz.attr,
|
||||
&dev_attr_gt_cur_freq_mhz.attr,
|
||||
&dev_attr_gt_boost_freq_mhz.attr,
|
||||
&dev_attr_gt_max_freq_mhz.attr,
|
||||
&dev_attr_gt_min_freq_mhz.attr,
|
||||
&dev_attr_gt_RP0_freq_mhz.attr,
|
||||
|
|
|
@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_evict,
|
||||
TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
|
||||
TP_ARGS(dev, size, align, flags),
|
||||
TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
|
||||
TP_ARGS(vm, size, align, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(struct i915_address_space *, vm)
|
||||
__field(u32, size)
|
||||
__field(u32, align)
|
||||
__field(unsigned, flags)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = dev->primary->index;
|
||||
__entry->dev = vm->dev->primary->index;
|
||||
__entry->vm = vm;
|
||||
__entry->size = size;
|
||||
__entry->align = align;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("dev=%d, size=%d, align=%d %s",
|
||||
__entry->dev, __entry->size, __entry->align,
|
||||
TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
|
||||
__entry->dev, __entry->vm, __entry->size, __entry->align,
|
||||
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
|
||||
);
|
||||
|
||||
|
@ -449,10 +451,9 @@ TRACE_EVENT(i915_gem_evict_vm,
|
|||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct drm_i915_gem_request *to_req,
|
||||
struct intel_engine_cs *from,
|
||||
struct drm_i915_gem_request *req),
|
||||
TP_ARGS(to_req, from, req),
|
||||
TP_PROTO(struct drm_i915_gem_request *to,
|
||||
struct drm_i915_gem_request *from),
|
||||
TP_ARGS(to, from),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
|
@ -463,9 +464,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->i915->drm.primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to_req->engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
__entry->sync_from = from->engine->id;
|
||||
__entry->sync_to = to->engine->id;
|
||||
__entry->seqno = from->fence.seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
|
||||
|
@ -488,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
__entry->flags = flags;
|
||||
intel_engine_enable_signaling(req);
|
||||
fence_enable_sw_signaling(&req->fence);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
||||
|
@ -533,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||
|
@ -595,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
__entry->blocking =
|
||||
mutex_is_locked(&req->i915->drm.struct_mutex);
|
||||
),
|
||||
|
|
|
@ -97,6 +97,7 @@ static struct _balloon_info_ bl_info;
|
|||
|
||||
/**
|
||||
* intel_vgt_deballoon - deballoon reserved graphics address trunks
|
||||
* @dev_priv: i915 device private data
|
||||
*
|
||||
* This function is called to deallocate the ballooned-out graphic memory, when
|
||||
* driver is unloaded or when ballooning fails.
|
||||
|
@ -138,7 +139,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
|
|||
|
||||
/**
|
||||
* intel_vgt_balloon - balloon out reserved graphics address trunks
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private data
|
||||
*
|
||||
* This function is called at the initialization stage, to balloon out the
|
||||
* graphic address space allocated to other vGPUs, by marking these spaces as
|
||||
|
|
|
@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
|
|||
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
i915_audio_component_get_power(dev);
|
||||
|
||||
/*
|
||||
* Enable/disable generating the codec wake signal, overriding the
|
||||
* internal logic to generate the codec wake to controller.
|
||||
|
@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
|
|||
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
|
||||
usleep_range(1000, 1500);
|
||||
}
|
||||
|
||||
i915_audio_component_put_power(dev);
|
||||
}
|
||||
|
||||
/* Get CDCLK in kHz */
|
||||
|
@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
|
|||
!IS_HASWELL(dev_priv))
|
||||
return 0;
|
||||
|
||||
i915_audio_component_get_power(dev);
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
/* 1. get the pipe */
|
||||
intel_encoder = dev_priv->dig_port_map[port];
|
||||
|
@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
|
|||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
i915_audio_component_put_power(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,6 +51,13 @@ static void irq_enable(struct intel_engine_cs *engine)
|
|||
*/
|
||||
engine->breadcrumbs.irq_posted = true;
|
||||
|
||||
/* Make sure the current hangcheck doesn't falsely accuse a just
|
||||
* started irq handler from missing an interrupt (because the
|
||||
* interrupt count still matches the stale value from when
|
||||
* the irq handler was disabled, many hangchecks ago).
|
||||
*/
|
||||
engine->breadcrumbs.irq_wakeups++;
|
||||
|
||||
spin_lock_irq(&engine->i915->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock_irq(&engine->i915->irq_lock);
|
||||
|
@ -436,6 +443,7 @@ static int intel_breadcrumbs_signaler(void *arg)
|
|||
*/
|
||||
intel_engine_remove_wait(engine,
|
||||
&request->signaling.wait);
|
||||
fence_signal(&request->fence);
|
||||
|
||||
/* Find the next oldest signal. Note that as we have
|
||||
* not been holding the lock, another client may
|
||||
|
@ -452,7 +460,7 @@ static int intel_breadcrumbs_signaler(void *arg)
|
|||
rb_erase(&request->signaling.node, &b->signals);
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
i915_gem_request_unreference(request);
|
||||
i915_gem_request_put(request);
|
||||
} else {
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
@ -472,18 +480,14 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
|
|||
struct rb_node *parent, **p;
|
||||
bool first, wakeup;
|
||||
|
||||
if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
|
||||
return;
|
||||
|
||||
spin_lock(&b->lock);
|
||||
if (unlikely(request->signaling.wait.tsk)) {
|
||||
wakeup = false;
|
||||
goto unlock;
|
||||
}
|
||||
/* locked by fence_enable_sw_signaling() */
|
||||
assert_spin_locked(&request->lock);
|
||||
|
||||
request->signaling.wait.tsk = b->signaler;
|
||||
request->signaling.wait.seqno = request->seqno;
|
||||
i915_gem_request_reference(request);
|
||||
request->signaling.wait.seqno = request->fence.seqno;
|
||||
i915_gem_request_get(request);
|
||||
|
||||
spin_lock(&b->lock);
|
||||
|
||||
/* First add ourselves into the list of waiters, but register our
|
||||
* bottom-half as the signaller thread. As per usual, only the oldest
|
||||
|
@ -504,8 +508,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
|
|||
p = &b->signals.rb_node;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
if (i915_seqno_passed(request->seqno,
|
||||
to_signaler(parent)->seqno)) {
|
||||
if (i915_seqno_passed(request->fence.seqno,
|
||||
to_signaler(parent)->fence.seqno)) {
|
||||
p = &parent->rb_right;
|
||||
first = false;
|
||||
} else {
|
||||
|
@ -517,7 +521,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
|
|||
if (first)
|
||||
smp_store_mb(b->first_signal, request);
|
||||
|
||||
unlock:
|
||||
spin_unlock(&b->lock);
|
||||
|
||||
if (wakeup)
|
||||
|
|
|
@ -32,13 +32,6 @@
|
|||
* onwards to drive newly added DMC (Display microcontroller) in display
|
||||
* engine to save and restore the state of display engine when it enter into
|
||||
* low-power state and comes back to normal.
|
||||
*
|
||||
* Firmware loading status will be one of the below states: FW_UNINITIALIZED,
|
||||
* FW_LOADED, FW_FAILED.
|
||||
*
|
||||
* Once the firmware is written into the registers status will be moved from
|
||||
* FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
|
||||
* be moved to FW_FAILED.
|
||||
*/
|
||||
|
||||
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
|
||||
|
|
|
@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
|
|||
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
|
||||
{ 0x0000201B, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x80007011, 0x000000CD, 0x0 },
|
||||
{ 0x80007011, 0x000000CD, 0x1 },
|
||||
{ 0x80009010, 0x000000C0, 0x1 },
|
||||
{ 0x0000201B, 0x0000009D, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x1 },
|
||||
|
@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
|
|||
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
|
||||
{ 0x00000018, 0x000000A2, 0x0 },
|
||||
{ 0x00005012, 0x00000088, 0x0 },
|
||||
{ 0x80007011, 0x000000CD, 0x0 },
|
||||
{ 0x80007011, 0x000000CD, 0x3 },
|
||||
{ 0x80009010, 0x000000C0, 0x3 },
|
||||
{ 0x00000018, 0x0000009D, 0x0 },
|
||||
{ 0x80005012, 0x000000C0, 0x3 },
|
||||
|
@ -301,45 +301,34 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
|
|||
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
|
||||
};
|
||||
|
||||
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type);
|
||||
|
||||
static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
|
||||
struct intel_digital_port **dig_port,
|
||||
enum port *port)
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
|
||||
switch (intel_encoder->type) {
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_DP_MST:
|
||||
*dig_port = enc_to_mst(encoder)->primary;
|
||||
*port = (*dig_port)->port;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
|
||||
/* fallthrough and treat as unknown */
|
||||
return enc_to_mst(&encoder->base)->primary->port;
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
*dig_port = enc_to_dig_port(encoder);
|
||||
*port = (*dig_port)->port;
|
||||
break;
|
||||
return enc_to_dig_port(&encoder->base)->port;
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
*dig_port = NULL;
|
||||
*port = PORT_E;
|
||||
break;
|
||||
return PORT_E;
|
||||
default:
|
||||
MISSING_CASE(encoder->type);
|
||||
return PORT_A;
|
||||
}
|
||||
}
|
||||
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
|
||||
static const struct ddi_buf_trans *
|
||||
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
|
||||
{
|
||||
struct intel_digital_port *dig_port;
|
||||
enum port port;
|
||||
|
||||
ddi_get_encoder_port(intel_encoder, &dig_port, &port);
|
||||
|
||||
return port;
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
return bdw_ddi_translations_edp;
|
||||
} else {
|
||||
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
return bdw_ddi_translations_dp;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct ddi_buf_trans *
|
||||
|
@ -388,39 +377,58 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
|
|||
}
|
||||
}
|
||||
|
||||
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
int n_hdmi_entries;
|
||||
int hdmi_level;
|
||||
int hdmi_default_entry;
|
||||
|
||||
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return hdmi_level;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
|
||||
hdmi_default_entry = 8;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 6;
|
||||
} else {
|
||||
WARN(1, "ddi translation table missing\n");
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
}
|
||||
|
||||
/* Choose a good default if VBT is badly populated */
|
||||
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
|
||||
hdmi_level >= n_hdmi_entries)
|
||||
hdmi_level = hdmi_default_entry;
|
||||
|
||||
return hdmi_level;
|
||||
}
|
||||
|
||||
/*
|
||||
* Starting with Haswell, DDI port buffers must be programmed with correct
|
||||
* values in advance. The buffer values are different for FDI and DP modes,
|
||||
* but the HDMI/DVI fields are shared among those. So we program the DDI
|
||||
* in either FDI or DP modes only, as HDMI connections will work with both
|
||||
* of those
|
||||
* values in advance. This function programs the correct values for
|
||||
* DP/eDP/FDI use cases.
|
||||
*/
|
||||
void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 iboost_bit = 0;
|
||||
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
|
||||
size;
|
||||
int hdmi_level;
|
||||
enum port port;
|
||||
int i, n_dp_entries, n_edp_entries, size;
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
const struct ddi_buf_trans *ddi_translations_fdi;
|
||||
const struct ddi_buf_trans *ddi_translations_dp;
|
||||
const struct ddi_buf_trans *ddi_translations_edp;
|
||||
const struct ddi_buf_trans *ddi_translations_hdmi;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
port = intel_ddi_get_encoder_port(encoder);
|
||||
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (encoder->type != INTEL_OUTPUT_HDMI)
|
||||
return;
|
||||
|
||||
/* Vswing programming for HDMI */
|
||||
bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ddi_translations_fdi = NULL;
|
||||
|
@ -428,13 +436,10 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
|||
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
|
||||
ddi_translations_edp =
|
||||
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
|
||||
ddi_translations_hdmi =
|
||||
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
|
||||
hdmi_default_entry = 8;
|
||||
|
||||
/* If we're boosting the current, set bit 31 of trans1 */
|
||||
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
|
||||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
|
||||
iboost_bit = 1<<31;
|
||||
if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
|
||||
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
|
||||
|
||||
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
|
||||
port != PORT_A && port != PORT_E &&
|
||||
|
@ -443,38 +448,20 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
|||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
}
|
||||
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
|
||||
ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
|
||||
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
ddi_translations_fdi = hsw_ddi_translations_fdi;
|
||||
ddi_translations_dp = hsw_ddi_translations_dp;
|
||||
ddi_translations_edp = hsw_ddi_translations_dp;
|
||||
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
|
||||
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 6;
|
||||
} else {
|
||||
WARN(1, "ddi translation table missing\n");
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
}
|
||||
|
||||
switch (encoder->type) {
|
||||
|
@ -483,7 +470,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
|||
size = n_edp_entries;
|
||||
break;
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
ddi_translations = ddi_translations_dp;
|
||||
size = n_dp_entries;
|
||||
break;
|
||||
|
@ -501,19 +487,48 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
|||
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
|
||||
ddi_translations[i].trans2);
|
||||
}
|
||||
}
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_HDMI)
|
||||
/*
|
||||
* Starting with Haswell, DDI port buffers must be programmed with correct
|
||||
* values in advance. This function programs the correct values for
|
||||
* HDMI/DVI use cases.
|
||||
*/
|
||||
static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 iboost_bit = 0;
|
||||
int n_hdmi_entries, hdmi_level;
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
const struct ddi_buf_trans *ddi_translations_hdmi;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
return;
|
||||
|
||||
/* Choose a good default if VBT is badly populated */
|
||||
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
|
||||
hdmi_level >= n_hdmi_entries)
|
||||
hdmi_level = hdmi_default_entry;
|
||||
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
|
||||
|
||||
/* If we're boosting the current, set bit 31 of trans1 */
|
||||
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
|
||||
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
} else if (IS_HASWELL(dev_priv)) {
|
||||
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
|
||||
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
|
||||
} else {
|
||||
WARN(1, "ddi translation table missing\n");
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
}
|
||||
|
||||
/* Entry 9 is for HDMI: */
|
||||
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
|
||||
I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
|
||||
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
|
||||
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
|
||||
I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
|
||||
ddi_translations_hdmi[hdmi_level].trans2);
|
||||
}
|
||||
|
||||
|
@ -550,7 +565,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
|||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
|
||||
intel_prepare_ddi_buffer(encoder);
|
||||
intel_prepare_dp_ddi_buffers(encoder);
|
||||
}
|
||||
|
||||
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
|
||||
|
@ -1111,7 +1126,6 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
@ -1177,29 +1191,15 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
|
|||
temp |= TRANS_DDI_MODE_SELECT_HDMI;
|
||||
else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DVI;
|
||||
|
||||
} else if (type == INTEL_OUTPUT_ANALOG) {
|
||||
temp |= TRANS_DDI_MODE_SELECT_FDI;
|
||||
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
|
||||
|
||||
} else if (type == INTEL_OUTPUT_DP ||
|
||||
type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
|
||||
} else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
|
||||
} else if (type == INTEL_OUTPUT_DP_MST) {
|
||||
struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
|
||||
} else
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
|
||||
|
||||
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
|
||||
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
|
||||
} else {
|
||||
WARN(1, "Invalid encoder type %d for pipe %c\n",
|
||||
|
@ -1379,14 +1379,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
|
|||
TRANS_CLK_SEL_DISABLED);
|
||||
}
|
||||
|
||||
static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
||||
u32 level, enum port port, int type)
|
||||
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
||||
enum port port, uint8_t iboost)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
|
||||
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
|
||||
if (iboost)
|
||||
tmp |= iboost << BALANCE_LEG_SHIFT(port);
|
||||
else
|
||||
tmp |= BALANCE_LEG_DISABLE(port);
|
||||
I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
|
||||
}
|
||||
|
||||
static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
enum port port = intel_dig_port->port;
|
||||
int type = encoder->type;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
uint8_t iboost;
|
||||
uint8_t dp_iboost, hdmi_iboost;
|
||||
int n_entries;
|
||||
u32 reg;
|
||||
|
||||
/* VBT may override standard boost values */
|
||||
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
|
||||
|
@ -1428,16 +1444,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
|
|||
return;
|
||||
}
|
||||
|
||||
reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
|
||||
reg &= ~BALANCE_LEG_MASK(port);
|
||||
reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
|
||||
_skl_ddi_set_iboost(dev_priv, port, iboost);
|
||||
|
||||
if (iboost)
|
||||
reg |= iboost << BALANCE_LEG_SHIFT(port);
|
||||
else
|
||||
reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
|
||||
|
||||
I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
|
||||
if (port == PORT_A && intel_dig_port->max_lanes == 4)
|
||||
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
|
||||
}
|
||||
|
||||
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
|
||||
|
@ -1568,7 +1578,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
|||
level = translate_signal_level(signal_levels);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
|
||||
skl_ddi_set_iboost(encoder, level);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
|
||||
|
||||
|
@ -1615,8 +1625,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
|
||||
}
|
||||
|
||||
intel_prepare_ddi_buffer(intel_encoder);
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
intel_edp_panel_on(intel_dp);
|
||||
|
@ -1627,6 +1635,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
intel_prepare_dp_ddi_buffers(intel_encoder);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, crtc->config);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(intel_encoder);
|
||||
|
@ -1637,6 +1647,15 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
|||
intel_dp_stop_link_train(intel_dp);
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
int level = intel_ddi_hdmi_level(dev_priv, port);
|
||||
|
||||
intel_prepare_hdmi_ddi_buffers(intel_encoder);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_set_iboost(intel_encoder, level);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
bxt_ddi_vswing_sequence(dev_priv, level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
|
||||
intel_hdmi->set_infoframes(encoder,
|
||||
crtc->config->has_hdmi_sink,
|
||||
|
@ -2105,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
|||
|
||||
val = DP_TP_CTL_ENABLE |
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
if (intel_dp->is_mst)
|
||||
if (intel_dp->link_mst)
|
||||
val |= DP_TP_CTL_MODE_MST;
|
||||
else {
|
||||
val |= DP_TP_CTL_MODE_SST;
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <drm/drm_edid.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_dmabuf.h"
|
||||
|
@ -2465,9 +2466,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
|||
return false;
|
||||
}
|
||||
|
||||
obj->tiling_mode = plane_config->tiling;
|
||||
if (obj->tiling_mode == I915_TILING_X)
|
||||
obj->stride = fb->pitches[0];
|
||||
if (plane_config->tiling == I915_TILING_X)
|
||||
obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
|
||||
|
||||
mode_cmd.pixel_format = fb->pixel_format;
|
||||
mode_cmd.width = fb->width;
|
||||
|
@ -2488,7 +2488,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
|||
return true;
|
||||
|
||||
out_unref_obj:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return false;
|
||||
}
|
||||
|
@ -2593,14 +2593,15 @@ valid_fb:
|
|||
intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
|
||||
|
||||
obj = intel_fb_obj(fb);
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
drm_framebuffer_reference(fb);
|
||||
primary->fb = primary->state->fb = fb;
|
||||
primary->crtc = primary->state->crtc = &intel_crtc->base;
|
||||
intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
|
||||
obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
|
||||
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
|
||||
&obj->frontbuffer_bits);
|
||||
}
|
||||
|
||||
static void i9xx_update_primary_plane(struct drm_plane *primary,
|
||||
|
@ -2670,8 +2671,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4 &&
|
||||
obj->tiling_mode != I915_TILING_NONE)
|
||||
if (INTEL_INFO(dev)->gen >= 4 && i915_gem_object_is_tiled(obj))
|
||||
dspcntr |= DISPPLANE_TILED;
|
||||
|
||||
if (IS_G4X(dev))
|
||||
|
@ -2780,7 +2780,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
dspcntr |= DISPPLANE_TILED;
|
||||
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
|
||||
|
@ -4564,12 +4564,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
|
|||
struct drm_atomic_state *old_state = old_crtc_state->base.state;
|
||||
struct intel_crtc_state *pipe_config =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_plane *primary = crtc->base.primary;
|
||||
struct drm_plane_state *old_pri_state =
|
||||
drm_atomic_get_existing_plane_state(old_state, primary);
|
||||
|
||||
intel_frontbuffer_flip(dev, pipe_config->fb_bits);
|
||||
intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
|
||||
|
||||
crtc->wm.cxsr_allowed = true;
|
||||
|
||||
|
@ -4692,7 +4691,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
|
|||
* to compute the mask of flip planes precisely. For the time being
|
||||
* consider this a flip to a NULL plane.
|
||||
*/
|
||||
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
|
||||
intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
|
||||
}
|
||||
|
||||
static void ironlake_crtc_enable(struct drm_crtc *crtc)
|
||||
|
@ -10434,7 +10433,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
|
|||
|
||||
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
|
||||
if (IS_ERR(fb))
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
@ -10945,13 +10944,13 @@ static void intel_unpin_work_fn(struct work_struct *__work)
|
|||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
|
||||
drm_gem_object_unreference(&work->pending_flip_obj->base);
|
||||
|
||||
if (work->flip_queued_req)
|
||||
i915_gem_request_assign(&work->flip_queued_req, NULL);
|
||||
i915_gem_object_put(work->pending_flip_obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
|
||||
i915_gem_request_put(work->flip_queued_req);
|
||||
|
||||
intel_frontbuffer_flip_complete(to_i915(dev),
|
||||
to_intel_plane(primary)->frontbuffer_bit);
|
||||
intel_fbc_post_update(crtc);
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
|
||||
|
@ -11116,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11132,13 +11131,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
|
|||
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
||||
else
|
||||
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, 0); /* aux display base address, unused */
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(ring, 0); /* aux display base address, unused */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11150,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
@ -11163,13 +11162,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
|
|||
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
||||
else
|
||||
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11181,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11195,11 +11194,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
* Display Registers (which do not change across a page-flip)
|
||||
* so we need only reprogram the base address.
|
||||
*/
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
|
||||
obj->tiling_mode);
|
||||
intel_ring_emit(ring, fb->pitches[0]);
|
||||
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
|
||||
i915_gem_object_get_tiling(obj));
|
||||
|
||||
/* XXX Enabling the panel-fitter across page-flip is so far
|
||||
* untested on non-native modes, so ignore it for now.
|
||||
|
@ -11207,7 +11206,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
|
|||
*/
|
||||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(engine, pf | pipesrc);
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11219,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
|
@ -11229,10 +11228,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj));
|
||||
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
|
||||
|
||||
/* Contrary to the suggestions in the documentation,
|
||||
* "Enable Panel Fitter" does not seem to be required when page
|
||||
|
@ -11242,7 +11241,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
|
|||
*/
|
||||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(engine, pf | pipesrc);
|
||||
intel_ring_emit(ring, pf | pipesrc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11254,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
struct drm_i915_gem_request *req,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_ring *ring = req->ring;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane_bit = 0;
|
||||
int len, ret;
|
||||
|
@ -11275,7 +11274,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
}
|
||||
|
||||
len = 4;
|
||||
if (engine->id == RCS) {
|
||||
if (req->engine->id == RCS) {
|
||||
len += 6;
|
||||
/*
|
||||
* On Gen 8, SRM is now taking an extra dword to accommodate
|
||||
|
@ -11313,30 +11312,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
|
|||
* for the RCS also doesn't appear to drop events. Setting the DERRMR
|
||||
* to zero does lead to lockups within MI_DISPLAY_FLIP.
|
||||
*/
|
||||
if (engine->id == RCS) {
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, DERRMR);
|
||||
intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
|
||||
if (req->engine->id == RCS) {
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(ring, DERRMR);
|
||||
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE));
|
||||
if (IS_GEN8(dev))
|
||||
intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
else
|
||||
intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
|
||||
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit_reg(engine, DERRMR);
|
||||
intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
|
||||
intel_ring_emit_reg(ring, DERRMR);
|
||||
intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
|
||||
if (IS_GEN8(dev)) {
|
||||
intel_ring_emit(engine, 0);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(ring, 0);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
|
||||
intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(engine, (MI_NOOP));
|
||||
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj));
|
||||
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
|
||||
intel_ring_emit(ring, (MI_NOOP));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11371,7 +11370,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
|
|||
if (resv && !reservation_object_test_signaled_rcu(resv, false))
|
||||
return true;
|
||||
|
||||
return engine != i915_gem_request_get_engine(obj->last_write_req);
|
||||
return engine != i915_gem_active_get_engine(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
|
||||
|
@ -11440,7 +11440,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
|
|||
|
||||
dspcntr = I915_READ(reg);
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
dspcntr |= DISPPLANE_TILED;
|
||||
else
|
||||
dspcntr &= ~DISPPLANE_TILED;
|
||||
|
@ -11463,9 +11463,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
|
|||
struct reservation_object *resv;
|
||||
|
||||
if (work->flip_queued_req)
|
||||
WARN_ON(__i915_wait_request(work->flip_queued_req,
|
||||
false, NULL,
|
||||
&dev_priv->rps.mmioflips));
|
||||
WARN_ON(i915_wait_request(work->flip_queued_req,
|
||||
false, NULL,
|
||||
NO_WAITBOOST));
|
||||
|
||||
/* For framebuffer backed by dmabuf, wait for fence */
|
||||
resv = i915_gem_object_get_dmabuf_resv(obj);
|
||||
|
@ -11576,7 +11576,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
struct intel_flip_work *work;
|
||||
struct intel_engine_cs *engine;
|
||||
bool mmio_flip;
|
||||
struct drm_i915_gem_request *request = NULL;
|
||||
struct drm_i915_gem_request *request;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -11642,7 +11642,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
|
||||
/* Reference the objects for the scheduled work. */
|
||||
drm_framebuffer_reference(work->old_fb);
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
update_state_fb(crtc->primary);
|
||||
|
@ -11650,7 +11649,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
intel_fbc_pre_update(intel_crtc, intel_crtc->config,
|
||||
to_intel_plane_state(primary->state));
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
work->pending_flip_obj = i915_gem_object_get(obj);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
|
@ -11669,13 +11668,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
|
||||
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
engine = &dev_priv->engine[BCS];
|
||||
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
|
||||
if (i915_gem_object_get_tiling(obj) !=
|
||||
i915_gem_object_get_tiling(intel_fb_obj(work->old_fb)))
|
||||
/* vlv: DISPLAY_FLIP fails to change tiling */
|
||||
engine = NULL;
|
||||
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
|
||||
engine = &dev_priv->engine[BCS];
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
engine = i915_gem_request_get_engine(obj->last_write_req);
|
||||
engine = i915_gem_active_get_engine(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
if (engine == NULL || engine->id != RCS)
|
||||
engine = &dev_priv->engine[BCS];
|
||||
} else {
|
||||
|
@ -11684,22 +11685,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
|
||||
mmio_flip = use_mmio_flip(engine, obj);
|
||||
|
||||
/* When using CS flips, we want to emit semaphores between rings.
|
||||
* However, when using mmio flips we will create a task to do the
|
||||
* synchronisation, so all we want here is to pin the framebuffer
|
||||
* into the display plane and skip any waits.
|
||||
*/
|
||||
if (!mmio_flip) {
|
||||
ret = i915_gem_object_sync(obj, engine, &request);
|
||||
if (!ret && !request) {
|
||||
request = i915_gem_request_alloc(engine, NULL);
|
||||
ret = PTR_ERR_OR_ZERO(request);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto cleanup_pending;
|
||||
}
|
||||
|
||||
ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
|
||||
if (ret)
|
||||
goto cleanup_pending;
|
||||
|
@ -11712,19 +11697,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
if (mmio_flip) {
|
||||
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
|
||||
|
||||
i915_gem_request_assign(&work->flip_queued_req,
|
||||
obj->last_write_req);
|
||||
|
||||
work->flip_queued_req = i915_gem_active_get(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
schedule_work(&work->mmio_work);
|
||||
} else {
|
||||
i915_gem_request_assign(&work->flip_queued_req, request);
|
||||
request = i915_gem_request_alloc(engine, engine->last_context);
|
||||
if (IS_ERR(request)) {
|
||||
ret = PTR_ERR(request);
|
||||
goto cleanup_unpin;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_sync(obj, request);
|
||||
if (ret)
|
||||
goto cleanup_request;
|
||||
|
||||
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
|
||||
page_flip_flags);
|
||||
if (ret)
|
||||
goto cleanup_unpin;
|
||||
goto cleanup_request;
|
||||
|
||||
intel_mark_page_flip_active(intel_crtc, work);
|
||||
|
||||
work->flip_queued_req = i915_gem_request_get(request);
|
||||
i915_add_request_no_flush(request);
|
||||
}
|
||||
|
||||
|
@ -11732,25 +11726,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|||
to_intel_plane(primary)->frontbuffer_bit);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_frontbuffer_flip_prepare(dev,
|
||||
intel_frontbuffer_flip_prepare(to_i915(dev),
|
||||
to_intel_plane(primary)->frontbuffer_bit);
|
||||
|
||||
trace_i915_flip_request(intel_crtc->plane, obj);
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_request:
|
||||
i915_add_request_no_flush(request);
|
||||
cleanup_unpin:
|
||||
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
|
||||
cleanup_pending:
|
||||
if (!IS_ERR_OR_NULL(request))
|
||||
i915_add_request_no_flush(request);
|
||||
atomic_dec(&intel_crtc->unpin_work_count);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
cleanup:
|
||||
crtc->primary->fb = old_fb;
|
||||
update_state_fb(crtc->primary);
|
||||
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
drm_framebuffer_unreference(work->old_fb);
|
||||
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
|
@ -12298,6 +12292,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
|
|||
struct drm_device *dev = state->dev;
|
||||
struct drm_connector *connector;
|
||||
unsigned int used_ports = 0;
|
||||
unsigned int used_mst_ports = 0;
|
||||
|
||||
/*
|
||||
* Walk the connector list instead of the encoder
|
||||
|
@ -12334,11 +12329,20 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
|
|||
return false;
|
||||
|
||||
used_ports |= port_mask;
|
||||
break;
|
||||
case INTEL_OUTPUT_DP_MST:
|
||||
used_mst_ports |=
|
||||
1 << enc_to_mst(&encoder->base)->primary->port;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* can't mix MST and SST/HDMI on the same port */
|
||||
if (used_ports & used_mst_ports)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -13506,8 +13510,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
|
|||
if (!intel_plane_state->wait_req)
|
||||
continue;
|
||||
|
||||
ret = __i915_wait_request(intel_plane_state->wait_req,
|
||||
true, NULL, NULL);
|
||||
ret = i915_wait_request(intel_plane_state->wait_req,
|
||||
true, NULL, NULL);
|
||||
if (ret) {
|
||||
/* Any hang should be swallowed by the wait */
|
||||
WARN_ON(ret == -EIO);
|
||||
|
@ -13619,8 +13623,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
if (!intel_plane_state->wait_req)
|
||||
continue;
|
||||
|
||||
ret = __i915_wait_request(intel_plane_state->wait_req,
|
||||
true, NULL, NULL);
|
||||
ret = i915_wait_request(intel_plane_state->wait_req,
|
||||
true, NULL, NULL);
|
||||
/* EIO should be eaten, and we can't get interrupted in the
|
||||
* worker, and blocking commits have waited already. */
|
||||
WARN_ON(ret);
|
||||
|
@ -13797,19 +13801,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
|
|||
{
|
||||
struct drm_plane_state *old_plane_state;
|
||||
struct drm_plane *plane;
|
||||
struct drm_i915_gem_object *obj, *old_obj;
|
||||
struct intel_plane *intel_plane;
|
||||
int i;
|
||||
|
||||
mutex_lock(&state->dev->struct_mutex);
|
||||
for_each_plane_in_state(state, plane, old_plane_state, i) {
|
||||
obj = intel_fb_obj(plane->state->fb);
|
||||
old_obj = intel_fb_obj(old_plane_state->fb);
|
||||
intel_plane = to_intel_plane(plane);
|
||||
|
||||
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
|
||||
}
|
||||
mutex_unlock(&state->dev->struct_mutex);
|
||||
for_each_plane_in_state(state, plane, old_plane_state, i)
|
||||
i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
|
||||
intel_fb_obj(plane->state->fb),
|
||||
to_intel_plane(plane)->frontbuffer_bit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -14038,11 +14035,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
|
|||
}
|
||||
|
||||
if (ret == 0) {
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(new_state);
|
||||
|
||||
i915_gem_request_assign(&plane_state->wait_req,
|
||||
obj->last_write_req);
|
||||
to_intel_plane_state(new_state)->wait_req =
|
||||
i915_gem_active_get(&obj->last_write,
|
||||
&obj->base.dev->struct_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -14063,6 +14058,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
|
|||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_plane_state *old_intel_state;
|
||||
struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
|
||||
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
|
||||
|
||||
|
@ -14075,6 +14071,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
|
|||
!INTEL_INFO(dev)->cursor_needs_physical))
|
||||
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
|
||||
|
||||
i915_gem_request_assign(&intel_state->wait_req, NULL);
|
||||
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
|
||||
}
|
||||
|
||||
|
@ -14831,7 +14828,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|||
drm_framebuffer_cleanup(fb);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
WARN_ON(!intel_fb->obj->framebuffer_references--);
|
||||
drm_gem_object_unreference(&intel_fb->obj->base);
|
||||
i915_gem_object_put(intel_fb->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
kfree(intel_fb);
|
||||
}
|
||||
|
@ -14920,15 +14917,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
|||
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
|
||||
/* Enforce that fb modifier and tiling mode match, but only for
|
||||
* X-tiled. This is needed for FBC. */
|
||||
if (!!(obj->tiling_mode == I915_TILING_X) !=
|
||||
if (!!(i915_gem_object_get_tiling(obj) == I915_TILING_X) !=
|
||||
!!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
|
||||
DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (obj->tiling_mode == I915_TILING_X)
|
||||
if (i915_gem_object_get_tiling(obj) == I915_TILING_X)
|
||||
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
|
||||
else if (obj->tiling_mode == I915_TILING_Y) {
|
||||
else if (i915_gem_object_get_tiling(obj) == I915_TILING_Y) {
|
||||
DRM_DEBUG("No Y tiling for legacy addfb\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -14972,9 +14969,10 @@ static int intel_framebuffer_init(struct drm_device *dev,
|
|||
}
|
||||
|
||||
if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
|
||||
mode_cmd->pitches[0] != obj->stride) {
|
||||
mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
|
||||
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
|
||||
mode_cmd->pitches[0], obj->stride);
|
||||
mode_cmd->pitches[0],
|
||||
i915_gem_object_get_stride(obj));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -15068,13 +15066,13 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
|||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
|
||||
|
||||
obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
|
||||
if (&obj->base == NULL)
|
||||
obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
|
||||
if (!obj)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
|
||||
if (IS_ERR(fb))
|
||||
drm_gem_object_unreference_unlocked(&obj->base);
|
||||
i915_gem_object_put_unlocked(obj);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
@ -15482,7 +15480,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
|
|||
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
|
||||
|
||||
intel_init_clock_gating(dev);
|
||||
intel_enable_gt_powersave(dev_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1041,10 +1041,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
|||
if (WARN_ON(txsize > 20))
|
||||
return -E2BIG;
|
||||
|
||||
WARN_ON(!msg->buffer != !msg->size);
|
||||
|
||||
if (msg->buffer)
|
||||
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
|
||||
else
|
||||
WARN_ON(msg->size);
|
||||
|
||||
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
|
||||
if (ret > 0) {
|
||||
|
@ -1447,7 +1447,7 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
|
|||
if (WARN_ON(len <= 0))
|
||||
return 162000;
|
||||
|
||||
return rates[rate_to_index(0, rates) - 1];
|
||||
return rates[len - 1];
|
||||
}
|
||||
|
||||
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
|
||||
|
@ -1651,6 +1651,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
|||
{
|
||||
intel_dp->link_rate = pipe_config->port_clock;
|
||||
intel_dp->lane_count = pipe_config->lane_count;
|
||||
intel_dp->link_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
|
||||
}
|
||||
|
||||
static void intel_dp_prepare(struct intel_encoder *encoder)
|
||||
|
@ -3395,20 +3396,94 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
intel_dp_read_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
|
||||
sizeof(intel_dp->dpcd)) < 0)
|
||||
return false; /* aux transfer failed */
|
||||
|
||||
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
|
||||
return false; /* DPCD not present */
|
||||
return intel_dp->dpcd[DP_DPCD_REV] != 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
|
||||
|
||||
/* this function is meant to be called only once */
|
||||
WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
|
||||
|
||||
if (!intel_dp_read_dpcd(intel_dp))
|
||||
return false;
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
|
||||
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
|
||||
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
|
||||
|
||||
/* Check if the panel supports PSR */
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
|
||||
intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
|
||||
dev_priv->psr.sink_support = true;
|
||||
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9 &&
|
||||
(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
|
||||
uint8_t frame_sync_cap;
|
||||
|
||||
dev_priv->psr.sink_support = true;
|
||||
drm_dp_dpcd_read(&intel_dp->aux,
|
||||
DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
|
||||
&frame_sync_cap, 1);
|
||||
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
|
||||
/* PSR2 needs frame sync as well */
|
||||
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
|
||||
DRM_DEBUG_KMS("PSR2 %s on sink",
|
||||
dev_priv->psr.psr2_support ? "supported" : "not supported");
|
||||
}
|
||||
|
||||
/* Read the eDP Display control capabilities registers */
|
||||
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
|
||||
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) ==
|
||||
sizeof(intel_dp->edp_dpcd)))
|
||||
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
|
||||
intel_dp->edp_dpcd);
|
||||
|
||||
/* Intermediate frequency support */
|
||||
if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
|
||||
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
|
||||
int i;
|
||||
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
|
||||
sink_rates, sizeof(sink_rates));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
|
||||
int val = le16_to_cpu(sink_rates[i]);
|
||||
|
||||
if (val == 0)
|
||||
break;
|
||||
|
||||
/* Value read is in kHz while drm clock is saved in deca-kHz */
|
||||
intel_dp->sink_rates[i] = (val * 200) / 10;
|
||||
}
|
||||
intel_dp->num_sink_rates = i;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (!intel_dp_read_dpcd(intel_dp))
|
||||
return false;
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
|
||||
&intel_dp->sink_count, 1) < 0)
|
||||
|
@ -3431,68 +3506,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
|||
if (!is_edp(intel_dp) && !intel_dp->sink_count)
|
||||
return false;
|
||||
|
||||
/* Check if the panel supports PSR */
|
||||
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
|
||||
if (is_edp(intel_dp)) {
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
|
||||
intel_dp->psr_dpcd,
|
||||
sizeof(intel_dp->psr_dpcd));
|
||||
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
|
||||
dev_priv->psr.sink_support = true;
|
||||
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9 &&
|
||||
(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
|
||||
uint8_t frame_sync_cap;
|
||||
|
||||
dev_priv->psr.sink_support = true;
|
||||
drm_dp_dpcd_read(&intel_dp->aux,
|
||||
DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
|
||||
&frame_sync_cap, 1);
|
||||
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
|
||||
/* PSR2 needs frame sync as well */
|
||||
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
|
||||
DRM_DEBUG_KMS("PSR2 %s on sink",
|
||||
dev_priv->psr.psr2_support ? "supported" : "not supported");
|
||||
}
|
||||
|
||||
/* Read the eDP Display control capabilities registers */
|
||||
memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
|
||||
if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
|
||||
(drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
|
||||
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
|
||||
sizeof(intel_dp->edp_dpcd)))
|
||||
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
|
||||
intel_dp->edp_dpcd);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
|
||||
yesno(intel_dp_source_supports_hbr2(intel_dp)),
|
||||
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
|
||||
|
||||
/* Intermediate frequency support */
|
||||
if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
|
||||
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
|
||||
int i;
|
||||
|
||||
drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
|
||||
sink_rates, sizeof(sink_rates));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
|
||||
int val = le16_to_cpu(sink_rates[i]);
|
||||
|
||||
if (val == 0)
|
||||
break;
|
||||
|
||||
/* Value read is in kHz while drm clock is saved in deca-kHz */
|
||||
intel_dp->sink_rates[i] = (val * 200) / 10;
|
||||
}
|
||||
intel_dp->num_sink_rates = i;
|
||||
}
|
||||
|
||||
intel_dp_print_rates(intel_dp);
|
||||
|
||||
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
|
||||
DP_DWN_STRM_PORT_PRESENT))
|
||||
return true; /* native DP sink */
|
||||
|
@ -3526,7 +3539,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_dp_probe_mst(struct intel_dp *intel_dp)
|
||||
intel_dp_can_mst(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 buf[1];
|
||||
|
||||
|
@ -3539,18 +3552,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
|
|||
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
|
||||
return false;
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
|
||||
if (buf[0] & DP_MST_CAP) {
|
||||
DRM_DEBUG_KMS("Sink is MST capable\n");
|
||||
intel_dp->is_mst = true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Sink is not MST capable\n");
|
||||
intel_dp->is_mst = false;
|
||||
}
|
||||
}
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
|
||||
return false;
|
||||
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
|
||||
return intel_dp->is_mst;
|
||||
return buf[0] & DP_MST_CAP;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_configure_mst(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (!i915.enable_dp_mst)
|
||||
return;
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
return;
|
||||
|
||||
intel_dp->is_mst = intel_dp_can_mst(intel_dp);
|
||||
|
||||
if (intel_dp->is_mst)
|
||||
DRM_DEBUG_KMS("Sink is MST capable\n");
|
||||
else
|
||||
DRM_DEBUG_KMS("Sink is not MST capable\n");
|
||||
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||
intel_dp->is_mst);
|
||||
}
|
||||
|
||||
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
|
||||
|
@ -3909,7 +3934,7 @@ static bool
|
|||
intel_dp_short_pulse(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
u8 sink_irq_vector;
|
||||
u8 sink_irq_vector = 0;
|
||||
u8 old_sink_count = intel_dp->sink_count;
|
||||
bool ret;
|
||||
|
||||
|
@ -3936,7 +3961,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
|
|||
|
||||
/* Try to read the source of the interrupt */
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
|
||||
sink_irq_vector != 0) {
|
||||
/* Clear interrupt source */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_DEVICE_SERVICE_IRQ_VECTOR,
|
||||
|
@ -3980,6 +4006,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
|||
connector_status_connected : connector_status_disconnected;
|
||||
}
|
||||
|
||||
if (intel_dp_can_mst(intel_dp))
|
||||
return connector_status_connected;
|
||||
|
||||
/* If no HPD, poke DDC gently */
|
||||
if (drm_probe_ddc(&intel_dp->aux.ddc))
|
||||
return connector_status_connected;
|
||||
|
@ -4217,8 +4246,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||
struct drm_device *dev = connector->dev;
|
||||
enum drm_connector_status status;
|
||||
enum intel_display_power_domain power_domain;
|
||||
bool ret;
|
||||
u8 sink_irq_vector;
|
||||
u8 sink_irq_vector = 0;
|
||||
|
||||
power_domain = intel_display_port_aux_power_domain(intel_encoder);
|
||||
intel_display_power_get(to_i915(dev), power_domain);
|
||||
|
@ -4252,10 +4280,17 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||
if (intel_encoder->type != INTEL_OUTPUT_EDP)
|
||||
intel_encoder->type = INTEL_OUTPUT_DP;
|
||||
|
||||
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
|
||||
yesno(intel_dp_source_supports_hbr2(intel_dp)),
|
||||
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
|
||||
|
||||
intel_dp_print_rates(intel_dp);
|
||||
|
||||
intel_dp_probe_oui(intel_dp);
|
||||
|
||||
ret = intel_dp_probe_mst(intel_dp);
|
||||
if (ret) {
|
||||
intel_dp_configure_mst(intel_dp);
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
/*
|
||||
* If we are in MST mode then this connector
|
||||
* won't appear connected or have anything
|
||||
|
@ -4290,7 +4325,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
|||
|
||||
/* Try to read the source of the interrupt */
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
|
||||
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
|
||||
sink_irq_vector != 0) {
|
||||
/* Clear interrupt source */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_DEVICE_SERVICE_IRQ_VECTOR,
|
||||
|
@ -5186,7 +5222,7 @@ unlock:
|
|||
|
||||
/**
|
||||
* intel_edp_drrs_invalidate - Disable Idleness DRRS
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called everytime rendering on the given planes start.
|
||||
|
@ -5194,10 +5230,9 @@ unlock:
|
|||
*
|
||||
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
void intel_edp_drrs_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -5229,7 +5264,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
|||
|
||||
/**
|
||||
* intel_edp_drrs_flush - Restart Idleness DRRS
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called every time rendering on the given planes has
|
||||
|
@ -5239,10 +5274,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
|||
*
|
||||
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
void intel_edp_drrs_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -5413,14 +5447,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
|||
pps_unlock(intel_dp);
|
||||
|
||||
/* Cache DPCD and EDID for edp. */
|
||||
has_dpcd = intel_dp_get_dpcd(intel_dp);
|
||||
has_dpcd = intel_edp_init_dpcd(intel_dp);
|
||||
|
||||
if (has_dpcd) {
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
|
||||
dev_priv->no_aux_handshake =
|
||||
intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
|
||||
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
|
||||
} else {
|
||||
if (!has_dpcd) {
|
||||
/* if this fails, presume the device is a ghost */
|
||||
DRM_INFO("failed to retrieve link info, disabling eDP\n");
|
||||
goto out_vdd_off;
|
||||
|
|
|
@ -170,10 +170,10 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
|||
intel_mst->connector = found;
|
||||
|
||||
if (intel_dp->active_mst_links == 0) {
|
||||
intel_prepare_ddi_buffer(&intel_dig_port->base);
|
||||
|
||||
intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
|
||||
|
||||
intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
|
||||
|
||||
intel_dp_set_link_params(intel_dp, intel_crtc->config);
|
||||
|
||||
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
|
||||
|
|
|
@ -849,6 +849,7 @@ struct intel_dp {
|
|||
int link_rate;
|
||||
uint8_t lane_count;
|
||||
uint8_t sink_count;
|
||||
bool link_mst;
|
||||
bool has_audio;
|
||||
bool detect_done;
|
||||
enum hdmi_force_audio force_audio;
|
||||
|
@ -1104,7 +1105,7 @@ void intel_crt_reset(struct drm_encoder *encoder);
|
|||
/* intel_ddi.c */
|
||||
void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
|
||||
void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
|
@ -1131,21 +1132,10 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
|||
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
|
||||
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
|
||||
|
||||
/* intel_frontbuffer.c */
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin);
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
unsigned int intel_fb_align_height(struct drm_device *dev,
|
||||
unsigned int height,
|
||||
uint32_t pixel_format,
|
||||
uint64_t fb_format_modifier);
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
|
||||
enum fb_op_origin origin);
|
||||
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
|
||||
uint64_t fb_modifier, uint32_t pixel_format);
|
||||
|
||||
|
@ -1381,11 +1371,12 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
|
|||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_drrs_disable(struct intel_dp *intel_dp);
|
||||
void intel_edp_drrs_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
|
||||
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port);
|
||||
struct intel_digital_port *port);
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
|
@ -1558,13 +1549,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
|
|||
/* intel_psr.c */
|
||||
void intel_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_init(struct drm_device *dev);
|
||||
void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
|
@ -1664,13 +1655,6 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
|
|||
atomic_dec(&dev_priv->pm.wakeref_count);
|
||||
}
|
||||
|
||||
/* TODO: convert users of these to rely instead on proper RPM refcounting */
|
||||
#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
|
||||
disable_rpm_wakeref_asserts(dev_priv)
|
||||
|
||||
#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
|
||||
enable_rpm_wakeref_asserts(dev_priv)
|
||||
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
|
@ -1696,11 +1680,11 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
|||
void intel_gpu_ips_teardown(void);
|
||||
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
|
|
231
drivers/gpu/drm/i915/intel_engine_cs.c
Normal file
231
drivers/gpu/drm/i915/intel_engine_cs.c
Normal file
|
@ -0,0 +1,231 @@
|
|||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_lrc.h"
|
||||
|
||||
static const struct engine_info {
|
||||
const char *name;
|
||||
unsigned exec_id;
|
||||
unsigned guc_id;
|
||||
u32 mmio_base;
|
||||
unsigned irq_shift;
|
||||
int (*init_legacy)(struct intel_engine_cs *engine);
|
||||
int (*init_execlists)(struct intel_engine_cs *engine);
|
||||
} intel_engines[] = {
|
||||
[RCS] = {
|
||||
.name = "render ring",
|
||||
.exec_id = I915_EXEC_RENDER,
|
||||
.guc_id = GUC_RENDER_ENGINE,
|
||||
.mmio_base = RENDER_RING_BASE,
|
||||
.irq_shift = GEN8_RCS_IRQ_SHIFT,
|
||||
.init_execlists = logical_render_ring_init,
|
||||
.init_legacy = intel_init_render_ring_buffer,
|
||||
},
|
||||
[BCS] = {
|
||||
.name = "blitter ring",
|
||||
.exec_id = I915_EXEC_BLT,
|
||||
.guc_id = GUC_BLITTER_ENGINE,
|
||||
.mmio_base = BLT_RING_BASE,
|
||||
.irq_shift = GEN8_BCS_IRQ_SHIFT,
|
||||
.init_execlists = logical_xcs_ring_init,
|
||||
.init_legacy = intel_init_blt_ring_buffer,
|
||||
},
|
||||
[VCS] = {
|
||||
.name = "bsd ring",
|
||||
.exec_id = I915_EXEC_BSD,
|
||||
.guc_id = GUC_VIDEO_ENGINE,
|
||||
.mmio_base = GEN6_BSD_RING_BASE,
|
||||
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
|
||||
.init_execlists = logical_xcs_ring_init,
|
||||
.init_legacy = intel_init_bsd_ring_buffer,
|
||||
},
|
||||
[VCS2] = {
|
||||
.name = "bsd2 ring",
|
||||
.exec_id = I915_EXEC_BSD,
|
||||
.guc_id = GUC_VIDEO_ENGINE2,
|
||||
.mmio_base = GEN8_BSD2_RING_BASE,
|
||||
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
|
||||
.init_execlists = logical_xcs_ring_init,
|
||||
.init_legacy = intel_init_bsd2_ring_buffer,
|
||||
},
|
||||
[VECS] = {
|
||||
.name = "video enhancement ring",
|
||||
.exec_id = I915_EXEC_VEBOX,
|
||||
.guc_id = GUC_VIDEOENHANCE_ENGINE,
|
||||
.mmio_base = VEBOX_RING_BASE,
|
||||
.irq_shift = GEN8_VECS_IRQ_SHIFT,
|
||||
.init_execlists = logical_xcs_ring_init,
|
||||
.init_legacy = intel_init_vebox_ring_buffer,
|
||||
},
|
||||
};
|
||||
|
||||
static struct intel_engine_cs *
|
||||
intel_engine_setup(struct drm_i915_private *dev_priv,
|
||||
enum intel_engine_id id)
|
||||
{
|
||||
const struct engine_info *info = &intel_engines[id];
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[id];
|
||||
|
||||
engine->id = id;
|
||||
engine->i915 = dev_priv;
|
||||
engine->name = info->name;
|
||||
engine->exec_id = info->exec_id;
|
||||
engine->hw_id = engine->guc_id = info->guc_id;
|
||||
engine->mmio_base = info->mmio_base;
|
||||
engine->irq_shift = info->irq_shift;
|
||||
|
||||
return engine;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
|
||||
* @dev: DRM device.
|
||||
*
|
||||
* Return: non-zero if the initialization failed.
|
||||
*/
|
||||
int intel_engines_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned int mask = 0;
|
||||
int (*init)(struct intel_engine_cs *engine);
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
|
||||
WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
|
||||
GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
|
||||
if (!HAS_ENGINE(dev_priv, i))
|
||||
continue;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
init = intel_engines[i].init_execlists;
|
||||
else
|
||||
init = intel_engines[i].init_legacy;
|
||||
|
||||
if (!init)
|
||||
continue;
|
||||
|
||||
ret = init(intel_engine_setup(dev_priv, i));
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
mask |= ENGINE_MASK(i);
|
||||
}
|
||||
|
||||
/*
|
||||
* Catch failures to update intel_engines table when the new engines
|
||||
* are added to the driver by a warning and disabling the forgotten
|
||||
* engines.
|
||||
*/
|
||||
if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
|
||||
struct intel_device_info *info =
|
||||
(struct intel_device_info *)&dev_priv->info;
|
||||
info->ring_mask = mask;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (i915.enable_execlists)
|
||||
intel_logical_ring_cleanup(&dev_priv->engine[i]);
|
||||
else
|
||||
intel_engine_cleanup(&dev_priv->engine[i]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
|
||||
{
|
||||
memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
|
||||
}
|
||||
|
||||
static void intel_engine_init_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
init_request_active(&engine->last_request, NULL);
|
||||
INIT_LIST_HEAD(&engine->request_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_setup_common - setup engine state not requiring hw access
|
||||
* @engine: Engine to setup.
|
||||
*
|
||||
* Initializes @engine@ structure members shared between legacy and execlists
|
||||
* submission modes which do not require hardware access.
|
||||
*
|
||||
* Typically done early in the submission mode specific engine setup stage.
|
||||
*/
|
||||
void intel_engine_setup_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
INIT_LIST_HEAD(&engine->buffers);
|
||||
INIT_LIST_HEAD(&engine->execlist_queue);
|
||||
spin_lock_init(&engine->execlist_lock);
|
||||
|
||||
engine->fence_context = fence_context_alloc(1);
|
||||
|
||||
intel_engine_init_requests(engine);
|
||||
intel_engine_init_hangcheck(engine);
|
||||
i915_gem_batch_pool_init(engine, &engine->batch_pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_init_common - initialize cengine state which might require hw access
|
||||
* @engine: Engine to initialize.
|
||||
*
|
||||
* Initializes @engine@ structure members shared between legacy and execlists
|
||||
* submission modes which do require hardware access.
|
||||
*
|
||||
* Typcally done at later stages of submission mode specific engine setup.
|
||||
*
|
||||
* Returns zero on success or an error code on failure.
|
||||
*/
|
||||
int intel_engine_init_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = intel_engine_init_breadcrumbs(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return intel_engine_init_cmd_parser(engine);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_cleanup_common - cleans up the engine state created by
|
||||
* the common initiailizers.
|
||||
* @engine: Engine to cleanup.
|
||||
*
|
||||
* This cleans up everything created by the common helpers.
|
||||
*/
|
||||
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_cleanup_cmd_parser(engine);
|
||||
intel_engine_fini_breadcrumbs(engine);
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
}
|
|
@ -741,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
|||
cache->fb.pixel_format = fb->pixel_format;
|
||||
cache->fb.stride = fb->pitches[0];
|
||||
cache->fb.fence_reg = obj->fence_reg;
|
||||
cache->fb.tiling_mode = obj->tiling_mode;
|
||||
cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
|
||||
}
|
||||
|
||||
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
||||
|
@ -1075,6 +1075,8 @@ out:
|
|||
/**
|
||||
* intel_fbc_enable: tries to enable FBC on the CRTC
|
||||
* @crtc: the CRTC
|
||||
* @crtc_state: corresponding &drm_crtc_state for @crtc
|
||||
* @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
|
||||
*
|
||||
* This function checks if the given CRTC was chosen for FBC, then enables it if
|
||||
* possible. Notice that it doesn't activate FBC. It is valid to call
|
||||
|
@ -1163,11 +1165,8 @@ void intel_fbc_disable(struct intel_crtc *crtc)
|
|||
return;
|
||||
|
||||
mutex_lock(&fbc->lock);
|
||||
if (fbc->crtc == crtc) {
|
||||
WARN_ON(!fbc->enabled);
|
||||
WARN_ON(fbc->active);
|
||||
if (fbc->crtc == crtc)
|
||||
__intel_fbc_disable(dev_priv);
|
||||
}
|
||||
mutex_unlock(&fbc->lock);
|
||||
|
||||
cancel_work_sync(&fbc->work.work);
|
||||
|
@ -1230,12 +1229,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
|||
if (i915.enable_fbc >= 0)
|
||||
return !!i915.enable_fbc;
|
||||
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
|
||||
if (intel_iommu_gfx_mapped &&
|
||||
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
|
||||
DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fbc_init - Initialize FBC
|
||||
* @dev_priv: the i915 device
|
||||
|
@ -1253,6 +1269,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
|||
fbc->active = false;
|
||||
fbc->work.scheduled = false;
|
||||
|
||||
if (need_fbc_vtd_wa(dev_priv))
|
||||
mkwrite_device_info(dev_priv)->has_fbc = false;
|
||||
|
||||
i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
|
||||
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -158,7 +159,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
|
||||
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
|
||||
if (IS_ERR(fb)) {
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
ret = PTR_ERR(fb);
|
||||
goto out;
|
||||
}
|
||||
|
@ -188,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
struct i915_vma *vma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
bool prealloc = false;
|
||||
void *vaddr;
|
||||
void __iomem *vaddr;
|
||||
int ret;
|
||||
|
||||
if (intel_fb &&
|
||||
|
@ -767,7 +768,7 @@ void intel_fbdev_fini(struct drm_device *dev)
|
|||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
flush_work(&dev_priv->fbdev_suspend_work);
|
||||
cancel_work_sync(&dev_priv->fbdev_suspend_work);
|
||||
if (!current_is_async())
|
||||
intel_fbdev_sync(ifbdev);
|
||||
|
||||
|
|
|
@ -63,47 +63,30 @@
|
|||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* @origin: which operation caused the invalidation
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
||||
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
|
||||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
if (origin == ORIGIN_CS) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits
|
||||
|= obj->frontbuffer_bits;
|
||||
dev_priv->fb_tracking.flip_bits
|
||||
&= ~obj->frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_psr_invalidate(dev, obj->frontbuffer_bits);
|
||||
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
|
||||
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
|
||||
intel_psr_invalidate(dev_priv, frontbuffer_bits);
|
||||
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
|
||||
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flush - flush frontbuffer
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
|
@ -113,64 +96,45 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
static void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Delay flushing when rings are still busy.*/
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
intel_edp_drrs_flush(dev, frontbuffer_bits);
|
||||
intel_psr_flush(dev, frontbuffer_bits, origin);
|
||||
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
|
||||
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
|
||||
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* @retire: set when retiring asynchronous rendering
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again. If @retire is true
|
||||
* then any delayed flushes will be unblocked.
|
||||
*/
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire, enum fb_op_origin origin)
|
||||
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned frontbuffer_bits;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
frontbuffer_bits = obj->frontbuffer_bits;
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
if (retire) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Filter out new bits since rendering started. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
|
||||
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
|
||||
if (frontbuffer_bits)
|
||||
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. The actual
|
||||
|
@ -180,23 +144,21 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_psr_single_frame_update(dev, frontbuffer_bits);
|
||||
intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after the flip has been latched and will complete
|
||||
|
@ -204,23 +166,23 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Mask any cancelled flips. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
|
||||
if (frontbuffer_bits)
|
||||
intel_frontbuffer_flush(dev_priv,
|
||||
frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. This is for
|
||||
|
@ -229,15 +191,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
|
||||
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
|
91
drivers/gpu/drm/i915/intel_frontbuffer.h
Normal file
91
drivers/gpu/drm/i915/intel_frontbuffer.h
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright (c) 2014-2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_FRONTBUFFER_H__
|
||||
#define __INTEL_FRONTBUFFER_H__
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* @origin: which operation caused the invalidation
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
||||
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
|
||||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
unsigned int frontbuffer_bits;
|
||||
|
||||
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* @retire: set when retiring asynchronous rendering
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again. If @retire is true
|
||||
* then any delayed flushes will be unblocked.
|
||||
*/
|
||||
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
unsigned int frontbuffer_bits;
|
||||
|
||||
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
__intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
|
||||
}
|
||||
|
||||
#endif /* __INTEL_FRONTBUFFER_H__ */
|
|
@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
|
|||
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
|
||||
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
|
||||
int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
|
||||
int i915_guc_submit(struct drm_i915_gem_request *rq);
|
||||
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
|
||||
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
|
||||
ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("pin failed %d\n", ret);
|
||||
return ret;
|
||||
|
@ -349,7 +349,9 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
/* WaC6DisallowByGfxPause*/
|
||||
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
|
||||
IS_BXT_REVID(dev, 0, BXT_REVID_B0))
|
||||
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
|
||||
|
@ -662,7 +664,7 @@ fail:
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
obj = guc_fw->guc_fw_obj;
|
||||
if (obj)
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
@ -743,7 +745,7 @@ void intel_guc_fini(struct drm_device *dev)
|
|||
i915_guc_submission_fini(dev_priv);
|
||||
|
||||
if (guc_fw->guc_fw_obj)
|
||||
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
|
||||
i915_gem_object_put(guc_fw->guc_fw_obj);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
|
|
@ -525,7 +525,6 @@ void i915_hpd_poll_init_work(struct work_struct *work) {
|
|||
/**
|
||||
* intel_hpd_poll_init - enables/disables polling for connectors with hpd
|
||||
* @dev_priv: i915 device instance
|
||||
* @enabled: Whether to enable or disable polling
|
||||
*
|
||||
* This function enables polling for all connectors, regardless of whether or
|
||||
* not they support hotplug detection. Under certain conditions HPD may not be
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -29,17 +29,17 @@
|
|||
#define GEN8_LR_CONTEXT_ALIGN 4096
|
||||
|
||||
/* Execlists regs */
|
||||
#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
|
||||
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
|
||||
#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
|
||||
#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
|
||||
#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
|
||||
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
|
||||
#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
|
||||
|
||||
/* The docs specify that the write pointer wraps around after 5h, "After status
|
||||
* is written out to the last available status QW at offset 5h, this pointer
|
||||
|
@ -67,35 +67,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
|
|||
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *engine);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
|
||||
int intel_logical_rings_init(struct drm_device *dev);
|
||||
int logical_render_ring_init(struct intel_engine_cs *engine);
|
||||
int logical_xcs_ring_init(struct intel_engine_cs *engine);
|
||||
|
||||
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
/**
|
||||
* intel_logical_ring_advance() - advance the ringbuffer tail
|
||||
* @ringbuf: Ringbuffer to advance.
|
||||
*
|
||||
* The tail is only updated in our logical ringbuffer struct.
|
||||
*/
|
||||
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
/**
|
||||
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
|
||||
* @ringbuf: Ringbuffer to write to.
|
||||
* @data: DWORD to write.
|
||||
*/
|
||||
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
||||
u32 data)
|
||||
{
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
int intel_engines_init(struct drm_device *dev);
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
|
||||
|
@ -120,10 +95,7 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
|
|||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
|
||||
int enable_execlists);
|
||||
struct i915_execbuffer_params;
|
||||
int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
|
||||
|
||||
|
|
|
@ -97,7 +97,8 @@ struct drm_i915_mocs_table {
|
|||
* end.
|
||||
*/
|
||||
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
||||
{ /* 0x00000009 */
|
||||
[I915_MOCS_UNCACHED] = {
|
||||
/* 0x00000009 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
|
@ -106,7 +107,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
|||
/* 0x0010 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
|
||||
},
|
||||
{
|
||||
[I915_MOCS_PTE] = {
|
||||
/* 0x00000038 */
|
||||
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
|
@ -115,7 +116,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
|||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
{
|
||||
[I915_MOCS_CACHED] = {
|
||||
/* 0x0000003b */
|
||||
.control_value = LE_CACHEABILITY(LE_WB) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
|
@ -128,7 +129,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
|||
|
||||
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
|
||||
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
||||
{
|
||||
[I915_MOCS_UNCACHED] = {
|
||||
/* 0x00000009 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
|
@ -138,7 +139,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
|||
/* 0x0010 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
|
||||
},
|
||||
{
|
||||
[I915_MOCS_PTE] = {
|
||||
/* 0x00000038 */
|
||||
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
|
@ -148,7 +149,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
|||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
{
|
||||
[I915_MOCS_CACHED] = {
|
||||
/* 0x00000039 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
|
@ -203,9 +204,9 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
|
|||
return result;
|
||||
}
|
||||
|
||||
static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
|
||||
static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
|
||||
{
|
||||
switch (ring) {
|
||||
switch (engine_id) {
|
||||
case RCS:
|
||||
return GEN9_GFX_MOCS(index);
|
||||
case VCS:
|
||||
|
@ -217,7 +218,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
|
|||
case VCS2:
|
||||
return GEN9_MFX1_MOCS(index);
|
||||
default:
|
||||
MISSING_CASE(ring);
|
||||
MISSING_CASE(engine_id);
|
||||
return INVALID_MMIO_REG;
|
||||
}
|
||||
}
|
||||
|
@ -275,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
|
|||
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ring *ring = req->ring;
|
||||
enum intel_engine_id engine = req->engine->id;
|
||||
unsigned int index;
|
||||
int ret;
|
||||
|
@ -287,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
|
||||
|
||||
for (index = 0; index < table->size; index++) {
|
||||
intel_logical_ring_emit_reg(ringbuf,
|
||||
mocs_register(engine, index));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
table->table[index].control_value);
|
||||
intel_ring_emit_reg(ring, mocs_register(engine, index));
|
||||
intel_ring_emit(ring, table->table[index].control_value);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -306,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
|
|||
* that value to all the used entries.
|
||||
*/
|
||||
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
|
||||
intel_logical_ring_emit_reg(ringbuf,
|
||||
mocs_register(engine, index));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
table->table[0].control_value);
|
||||
intel_ring_emit_reg(ring, mocs_register(engine, index));
|
||||
intel_ring_emit(ring, table->table[0].control_value);
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -340,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
|
|||
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
const struct drm_i915_mocs_table *table)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||
struct intel_ring *ring = req->ring;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
@ -351,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
intel_ring_emit(ring,
|
||||
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
|
||||
|
||||
for (i = 0; i < table->size/2; i++) {
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
l3cc_combine(table, 2*i, 2*i+1));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
|
||||
}
|
||||
|
||||
if (table->size & 0x01) {
|
||||
/* Odd table size - 1 left over */
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -373,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
|||
* they are reserved by the hardware.
|
||||
*/
|
||||
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
|
||||
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
|
||||
intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
|
||||
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
|
||||
intel_ring_emit(ring, l3cc_combine(table, 0, 0));
|
||||
}
|
||||
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
intel_logical_ring_advance(ringbuf);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,6 @@
|
|||
|
||||
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
|
||||
void intel_mocs_init_l3cc_table(struct drm_device *dev);
|
||||
int intel_mocs_init_engine(struct intel_engine_cs *ring);
|
||||
int intel_mocs_init_engine(struct intel_engine_cs *engine);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
/* Limits for overlay size. According to intel doc, the real limits are:
|
||||
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
|
||||
|
@ -183,8 +184,7 @@ struct intel_overlay {
|
|||
u32 flip_addr;
|
||||
struct drm_i915_gem_object *reg_bo;
|
||||
/* flip handling */
|
||||
struct drm_i915_gem_request *last_flip_req;
|
||||
void (*flip_tail)(struct intel_overlay *);
|
||||
struct i915_gem_active last_flip;
|
||||
};
|
||||
|
||||
static struct overlay_registers __iomem *
|
||||
|
@ -210,37 +210,46 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
|
|||
io_mapping_unmap(regs);
|
||||
}
|
||||
|
||||
static void intel_overlay_submit_request(struct intel_overlay *overlay,
|
||||
struct drm_i915_gem_request *req,
|
||||
i915_gem_retire_fn retire)
|
||||
{
|
||||
GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex));
|
||||
overlay->last_flip.retire = retire;
|
||||
i915_gem_active_set(&overlay->last_flip, req);
|
||||
i915_add_request(req);
|
||||
}
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct drm_i915_gem_request *req,
|
||||
void (*tail)(struct intel_overlay *))
|
||||
i915_gem_retire_fn retire)
|
||||
{
|
||||
int ret;
|
||||
intel_overlay_submit_request(overlay, req, retire);
|
||||
return i915_gem_active_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
WARN_ON(overlay->last_flip_req);
|
||||
i915_gem_request_assign(&overlay->last_flip_req, req);
|
||||
i915_add_request(req);
|
||||
static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
|
||||
overlay->flip_tail = tail;
|
||||
ret = i915_wait_request(overlay->last_flip_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_request_assign(&overlay->last_flip_req, NULL);
|
||||
return 0;
|
||||
return i915_gem_request_alloc(engine, dev_priv->kernel_context);
|
||||
}
|
||||
|
||||
/* overlay needs to be disable in OCMD reg */
|
||||
static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
int ret;
|
||||
|
||||
WARN_ON(overlay->active);
|
||||
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
req = alloc_request(overlay);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -252,11 +261,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
overlay->active = true;
|
||||
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, req, NULL);
|
||||
}
|
||||
|
@ -266,8 +276,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
bool load_polyphase_filter)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
@ -282,7 +292,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
req = alloc_request(overlay);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -292,29 +302,37 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_advance(engine);
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
WARN_ON(overlay->last_flip_req);
|
||||
i915_gem_request_assign(&overlay->last_flip_req, req);
|
||||
i915_add_request(req);
|
||||
intel_overlay_submit_request(overlay, req, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct drm_i915_gem_object *obj = overlay->old_vid_bo;
|
||||
|
||||
i915_gem_track_fb(obj, NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
overlay->old_vid_bo = NULL;
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
static void intel_overlay_off_tail(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct drm_i915_gem_object *obj = overlay->vid_bo;
|
||||
|
||||
/* never have the overlay hw on without showing a frame */
|
||||
|
@ -322,7 +340,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
|||
return;
|
||||
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
i915_gem_object_put(obj);
|
||||
overlay->vid_bo = NULL;
|
||||
|
||||
overlay->crtc->overlay = NULL;
|
||||
|
@ -334,8 +352,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
|||
static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
int ret;
|
||||
|
||||
|
@ -347,7 +365,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
req = alloc_request(overlay);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -357,46 +375,36 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ring = req->ring;
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
/* turn overlay off */
|
||||
if (IS_I830(dev_priv)) {
|
||||
/* Workaround: Don't disable the overlay fully, since otherwise
|
||||
* it dies on the next OVERLAY_ON cmd. */
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(engine, flip_addr);
|
||||
intel_ring_emit(engine,
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
}
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
|
||||
return intel_overlay_do_wait_request(overlay, req,
|
||||
intel_overlay_off_tail);
|
||||
}
|
||||
|
||||
/* recover from an interruption due to a signal
|
||||
* We have to be careful not to repeat work forever an make forward progess. */
|
||||
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (overlay->last_flip_req == NULL)
|
||||
return 0;
|
||||
|
||||
ret = i915_wait_request(overlay->last_flip_req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (overlay->flip_tail)
|
||||
overlay->flip_tail(overlay);
|
||||
|
||||
i915_gem_request_assign(&overlay->last_flip_req, NULL);
|
||||
return 0;
|
||||
return i915_gem_active_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
/* Wait for pending overlay flip and release old frame.
|
||||
|
@ -406,7 +414,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
|||
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
@ -420,8 +427,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
/* synchronous slowpath */
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
req = alloc_request(overlay);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
|
||||
|
@ -431,22 +439,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|||
return ret;
|
||||
}
|
||||
|
||||
intel_ring_emit(engine,
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_advance(engine);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, req,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
|
||||
|
||||
intel_overlay_release_old_vid_tail(overlay);
|
||||
|
||||
|
||||
i915_gem_track_fb(overlay->old_vid_bo, NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -459,7 +464,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_overlay_release_old_vid(overlay);
|
||||
|
||||
overlay->last_flip_req = NULL;
|
||||
overlay->old_xscale = 0;
|
||||
overlay->old_yscale = 0;
|
||||
overlay->crtc = NULL;
|
||||
|
@ -836,8 +840,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = new_bo;
|
||||
|
||||
intel_frontbuffer_flip(&dev_priv->drm,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -870,12 +873,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
|
|||
iowrite32(0, ®s->OCMD);
|
||||
intel_overlay_unmap_regs(overlay, regs);
|
||||
|
||||
ret = intel_overlay_off(overlay);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
intel_overlay_off_tail(overlay);
|
||||
return 0;
|
||||
return intel_overlay_off(overlay);
|
||||
}
|
||||
|
||||
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
|
||||
|
@ -1122,9 +1120,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
crtc = to_intel_crtc(drmmode_crtc);
|
||||
|
||||
new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
|
||||
put_image_rec->bo_handle));
|
||||
if (&new_bo->base == NULL) {
|
||||
new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
|
||||
if (!new_bo) {
|
||||
ret = -ENOENT;
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -1132,7 +1129,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
|||
drm_modeset_lock_all(dev);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (new_bo->tiling_mode) {
|
||||
if (i915_gem_object_is_tiled(new_bo)) {
|
||||
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
|
@ -1220,7 +1217,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
|||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
drm_gem_object_unreference_unlocked(&new_bo->base);
|
||||
i915_gem_object_put_unlocked(new_bo);
|
||||
out_free:
|
||||
kfree(params);
|
||||
|
||||
|
@ -1404,7 +1401,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
overlay->flip_addr = reg_bo->phys_handle->busaddr;
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
|
||||
ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
|
||||
0, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin overlay register bo\n");
|
||||
goto out_free_bo;
|
||||
|
@ -1444,7 +1442,7 @@ out_unpin_bo:
|
|||
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
i915_gem_object_ggtt_unpin(reg_bo);
|
||||
out_free_bo:
|
||||
drm_gem_object_unreference(®_bo->base);
|
||||
i915_gem_object_put(reg_bo);
|
||||
out_free:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
kfree(overlay);
|
||||
|
@ -1461,7 +1459,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
|
|||
* hardware should be off already */
|
||||
WARN_ON(dev_priv->overlay->active);
|
||||
|
||||
drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
|
||||
i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
|
||||
kfree(dev_priv->overlay);
|
||||
}
|
||||
|
||||
|
|
|
@ -340,6 +340,11 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
|
|||
I915_WRITE(FW_BLC_SELF, val);
|
||||
POSTING_READ(FW_BLC_SELF);
|
||||
} else if (IS_I915GM(dev)) {
|
||||
/*
|
||||
* FIXME can't find a bit like this for 915G, and
|
||||
* and yet it does have the related watermark in
|
||||
* FW_BLC_SELF. What's going on?
|
||||
*/
|
||||
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
|
||||
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
|
||||
I915_WRITE(INSTPM, val);
|
||||
|
@ -1580,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
obj = intel_fb_obj(enabled->primary->state->fb);
|
||||
|
||||
/* self-refresh seems busted with untiled */
|
||||
if (obj->tiling_mode == I915_TILING_NONE)
|
||||
if (!i915_gem_object_is_tiled(obj))
|
||||
enabled = NULL;
|
||||
}
|
||||
|
||||
|
@ -1604,6 +1609,9 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
unsigned long line_time_us;
|
||||
int entries;
|
||||
|
||||
if (IS_I915GM(dev) || IS_I945GM(dev))
|
||||
cpp = 4;
|
||||
|
||||
line_time_us = max(htotal * 1000 / clock, 1);
|
||||
|
||||
/* Use ns/us then divide to preserve precision */
|
||||
|
@ -1618,7 +1626,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
|
|||
if (IS_I945G(dev) || IS_I945GM(dev))
|
||||
I915_WRITE(FW_BLC_SELF,
|
||||
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
|
||||
else if (IS_I915GM(dev))
|
||||
else
|
||||
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
|
||||
}
|
||||
|
||||
|
@ -3344,6 +3352,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
|
|||
plane_bytes_per_line *= 4;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
plane_blocks_per_line /= 4;
|
||||
} else if (tiling == DRM_FORMAT_MOD_NONE) {
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
|
||||
} else {
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
}
|
||||
|
@ -4912,7 +4922,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
|
|||
*/
|
||||
if (!(dev_priv->gt.awake &&
|
||||
dev_priv->rps.enabled &&
|
||||
dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
|
||||
dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
|
||||
return;
|
||||
|
||||
/* Force a RPS boost (and don't count it against the client) if
|
||||
|
@ -5103,35 +5113,31 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
|
|||
|
||||
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
uint32_t rp_state_cap;
|
||||
u32 ddcc_status = 0;
|
||||
int ret;
|
||||
|
||||
/* All of these values are in units of 50MHz */
|
||||
dev_priv->rps.cur_freq = 0;
|
||||
|
||||
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
|
||||
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
|
||||
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
|
||||
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
|
||||
dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
|
||||
} else {
|
||||
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
|
||||
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
|
||||
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
|
||||
}
|
||||
|
||||
/* hw_max = RP0 until we check for overclocking */
|
||||
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
|
||||
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
|
||||
|
||||
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
ret = sandybridge_pcode_read(dev_priv,
|
||||
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
|
||||
&ddcc_status);
|
||||
if (0 == ret)
|
||||
u32 ddcc_status = 0;
|
||||
|
||||
if (sandybridge_pcode_read(dev_priv,
|
||||
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
|
||||
&ddcc_status) == 0)
|
||||
dev_priv->rps.efficient_freq =
|
||||
clamp_t(u8,
|
||||
((ddcc_status >> 8) & 0xff),
|
||||
|
@ -5141,29 +5147,26 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
/* Store the frequency values in 16.66 MHZ units, which is
|
||||
the natural hardware unit for SKL */
|
||||
* the natural hardware unit for SKL
|
||||
*/
|
||||
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
|
||||
dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
|
||||
dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
|
||||
dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
|
||||
dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
|
||||
static void reset_rps(struct drm_i915_private *dev_priv,
|
||||
void (*set)(struct drm_i915_private *, u8))
|
||||
{
|
||||
u8 freq = dev_priv->rps.cur_freq;
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
/* force a reset */
|
||||
dev_priv->rps.power = -1;
|
||||
dev_priv->rps.cur_freq = -1;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0) {
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
dev_priv->rps.min_freq_softlimit =
|
||||
max_t(int, dev_priv->rps.efficient_freq,
|
||||
intel_freq_opcode(dev_priv, 450));
|
||||
else
|
||||
dev_priv->rps.min_freq_softlimit =
|
||||
dev_priv->rps.min_freq;
|
||||
}
|
||||
set(dev_priv, freq);
|
||||
}
|
||||
|
||||
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
|
||||
|
@ -5171,8 +5174,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
gen6_init_rps_frequencies(dev_priv);
|
||||
|
||||
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
|
||||
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
|
||||
/*
|
||||
|
@ -5202,8 +5203,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
|
|||
/* Leaning on the below call to gen6_set_rps to program/setup the
|
||||
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
|
||||
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
|
||||
dev_priv->rps.power = HIGH_POWER; /* force a reset */
|
||||
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
|
||||
reset_rps(dev_priv, gen6_set_rps);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@ -5290,9 +5290,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
|
|||
/* 2a: Disable RC states. */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
|
||||
/* Initialize rps frequencies */
|
||||
gen6_init_rps_frequencies(dev_priv);
|
||||
|
||||
/* 2b: Program RC6 thresholds.*/
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
|
||||
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
|
||||
|
@ -5349,8 +5346,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* 6: Ring frequency + overclocking (our driver does this later */
|
||||
|
||||
dev_priv->rps.power = HIGH_POWER; /* force a reset */
|
||||
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
|
||||
reset_rps(dev_priv, gen6_set_rps);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@ -5358,7 +5354,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
|
|||
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
|
||||
u32 rc6vids, rc6_mask = 0;
|
||||
u32 gtfifodbg;
|
||||
int rc6_mode;
|
||||
int ret;
|
||||
|
@ -5382,9 +5378,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/* Initialize rps frequencies */
|
||||
gen6_init_rps_frequencies(dev_priv);
|
||||
|
||||
/* disable the counters and set deterministic thresholds */
|
||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||
|
||||
|
@ -5435,16 +5428,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
if (ret)
|
||||
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
|
||||
|
||||
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
|
||||
if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
|
||||
DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
|
||||
(dev_priv->rps.max_freq_softlimit & 0xff) * 50,
|
||||
(pcu_mbox & 0xff) * 50);
|
||||
dev_priv->rps.max_freq = pcu_mbox & 0xff;
|
||||
}
|
||||
|
||||
dev_priv->rps.power = HIGH_POWER; /* force a reset */
|
||||
gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
|
||||
reset_rps(dev_priv, gen6_set_rps);
|
||||
|
||||
rc6vids = 0;
|
||||
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
|
||||
|
@ -5463,7 +5447,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
|||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int min_freq = 15;
|
||||
unsigned int gpu_freq;
|
||||
|
@ -5547,16 +5531,6 @@ static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
}
|
||||
|
||||
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_CORE_RING_FREQ(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
__gen6_update_ring_freq(dev_priv);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val, rp0;
|
||||
|
@ -5746,7 +5720,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
|
|||
if (WARN_ON(!dev_priv->vlv_pctx))
|
||||
return;
|
||||
|
||||
drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
|
||||
i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
|
||||
dev_priv->vlv_pctx = NULL;
|
||||
}
|
||||
|
||||
|
@ -5769,8 +5743,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
|
||||
vlv_init_gpll_ref_freq(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||
switch ((val >> 6) & 3) {
|
||||
case 0:
|
||||
|
@ -5806,17 +5778,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||
dev_priv->rps.min_freq);
|
||||
|
||||
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
|
@ -5827,8 +5788,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
|
||||
vlv_init_gpll_ref_freq(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
@ -5870,17 +5829,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
dev_priv->rps.rp1_freq |
|
||||
dev_priv->rps.min_freq) & 1,
|
||||
"Odd GPU freq values\n");
|
||||
|
||||
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
|
||||
|
||||
/* Preserve min/max settings in case of re-init */
|
||||
if (dev_priv->rps.max_freq_softlimit == 0)
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
|
||||
if (dev_priv->rps.min_freq_softlimit == 0)
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
|
@ -5971,16 +5919,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
|
|||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
|
||||
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
|
||||
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||
dev_priv->rps.cur_freq);
|
||||
|
||||
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
|
||||
dev_priv->rps.idle_freq);
|
||||
|
||||
valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
|
||||
reset_rps(dev_priv, valleyview_set_rps);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@ -6060,16 +5999,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
|
|||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
|
||||
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
|
||||
DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
|
||||
dev_priv->rps.cur_freq);
|
||||
|
||||
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
|
||||
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
|
||||
dev_priv->rps.idle_freq);
|
||||
|
||||
valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
|
||||
reset_rps(dev_priv, valleyview_set_rps);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
@ -6398,19 +6328,11 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
|
|||
*/
|
||||
bool i915_gpu_busy(void)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_engine_cs *engine;
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irq(&mchdev_lock);
|
||||
if (!i915_mch_dev)
|
||||
goto out_unlock;
|
||||
dev_priv = i915_mch_dev;
|
||||
|
||||
for_each_engine(engine, dev_priv)
|
||||
ret |= !list_empty(&engine->request_list);
|
||||
|
||||
out_unlock:
|
||||
if (i915_mch_dev)
|
||||
ret = i915_mch_dev->gt.awake;
|
||||
spin_unlock_irq(&mchdev_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -6566,30 +6488,60 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
/* Initialize RPS limits (for userspace) */
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
cherryview_init_gt_powersave(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
valleyview_init_gt_powersave(dev_priv);
|
||||
else if (INTEL_GEN(dev_priv) >= 6)
|
||||
gen6_init_rps_frequencies(dev_priv);
|
||||
|
||||
/* Derive initial user preferences/limits from the hardware limits */
|
||||
dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
|
||||
dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
|
||||
|
||||
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
|
||||
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
|
||||
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
dev_priv->rps.min_freq_softlimit =
|
||||
max_t(int,
|
||||
dev_priv->rps.efficient_freq,
|
||||
intel_freq_opcode(dev_priv, 450));
|
||||
|
||||
/* After setting max-softlimit, find the overclock max freq */
|
||||
if (IS_GEN6(dev_priv) ||
|
||||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
|
||||
u32 params = 0;
|
||||
|
||||
sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms);
|
||||
if (params & BIT(31)) { /* OC supported */
|
||||
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
|
||||
(dev_priv->rps.max_freq & 0xff) * 50,
|
||||
(params & 0xff) * 50);
|
||||
dev_priv->rps.max_freq = params & 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
/* Finally allow us to boost to max by default */
|
||||
dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
intel_autoenable_gt_powersave(dev_priv);
|
||||
}
|
||||
|
||||
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
return;
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
valleyview_cleanup_gt_powersave(dev_priv);
|
||||
|
||||
if (!i915.enable_rc6)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
|
||||
gen6_disable_rps_interrupts(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_suspend_gt_powersave - suspend PM work and helper threads
|
||||
* @dev_priv: i915 device
|
||||
|
@ -6603,60 +6555,76 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
if (INTEL_GEN(dev_priv) < 6)
|
||||
return;
|
||||
|
||||
gen6_suspend_rps(dev_priv);
|
||||
if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
/* Force GPU to min freq during suspend */
|
||||
gen6_rps_idle(dev_priv);
|
||||
/* gen6_rps_idle() will be called later to disable interrupts */
|
||||
}
|
||||
|
||||
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->rps.enabled = true; /* force disabling */
|
||||
intel_disable_gt_powersave(dev_priv);
|
||||
|
||||
gen6_reset_rps_interrupts(dev_priv);
|
||||
}
|
||||
|
||||
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_IRONLAKE_M(dev_priv)) {
|
||||
ironlake_disable_drps(dev_priv);
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
|
||||
intel_suspend_gt_powersave(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
if (INTEL_INFO(dev_priv)->gen >= 9) {
|
||||
gen9_disable_rc6(dev_priv);
|
||||
gen9_disable_rps(dev_priv);
|
||||
} else if (IS_CHERRYVIEW(dev_priv))
|
||||
cherryview_disable_rps(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
valleyview_disable_rps(dev_priv);
|
||||
else
|
||||
gen6_disable_rps(dev_priv);
|
||||
|
||||
dev_priv->rps.enabled = false;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_gen6_powersave_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private,
|
||||
rps.delayed_resume_work.work);
|
||||
if (!READ_ONCE(dev_priv->rps.enabled))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
gen6_reset_rps_interrupts(dev_priv);
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
gen9_disable_rc6(dev_priv);
|
||||
gen9_disable_rps(dev_priv);
|
||||
} else if (IS_CHERRYVIEW(dev_priv)) {
|
||||
cherryview_disable_rps(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
valleyview_disable_rps(dev_priv);
|
||||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
gen6_disable_rps(dev_priv);
|
||||
} else if (IS_IRONLAKE_M(dev_priv)) {
|
||||
ironlake_disable_drps(dev_priv);
|
||||
}
|
||||
|
||||
dev_priv->rps.enabled = false;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* We shouldn't be disabling as we submit, so this should be less
|
||||
* racy than it appears!
|
||||
*/
|
||||
if (READ_ONCE(dev_priv->rps.enabled))
|
||||
return;
|
||||
|
||||
/* Powersaving is controlled by the host when inside a VM */
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
cherryview_enable_rps(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
valleyview_enable_rps(dev_priv);
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 9) {
|
||||
} else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
gen9_enable_rc6(dev_priv);
|
||||
gen9_enable_rps(dev_priv);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
__gen6_update_ring_freq(dev_priv);
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
gen8_enable_rps(dev_priv);
|
||||
__gen6_update_ring_freq(dev_priv);
|
||||
} else {
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
gen6_enable_rps(dev_priv);
|
||||
__gen6_update_ring_freq(dev_priv);
|
||||
gen6_update_ring_freq(dev_priv);
|
||||
} else if (IS_IRONLAKE_M(dev_priv)) {
|
||||
ironlake_enable_drps(dev_priv);
|
||||
intel_init_emon(dev_priv);
|
||||
}
|
||||
|
||||
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
|
||||
|
@ -6666,18 +6634,47 @@ static void intel_gen6_powersave_work(struct work_struct *work)
|
|||
WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
|
||||
|
||||
dev_priv->rps.enabled = true;
|
||||
|
||||
gen6_enable_rps_interrupts(dev_priv);
|
||||
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
}
|
||||
|
||||
static void __intel_autoenable_gt_powersave(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
|
||||
struct intel_engine_cs *rcs;
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
if (READ_ONCE(dev_priv->rps.enabled))
|
||||
goto out;
|
||||
|
||||
rcs = &dev_priv->engine[RCS];
|
||||
if (rcs->last_context)
|
||||
goto out;
|
||||
|
||||
if (!rcs->init_context)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
|
||||
if (IS_ERR(req))
|
||||
goto unlock;
|
||||
|
||||
if (!i915.enable_execlists && i915_switch_context(req) == 0)
|
||||
rcs->init_context(req);
|
||||
|
||||
/* Mark the device busy, calling intel_enable_gt_powersave() */
|
||||
i915_add_request_no_flush(req);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Powersaving is controlled by the host when inside a VM */
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
if (READ_ONCE(dev_priv->rps.enabled))
|
||||
return;
|
||||
|
||||
if (IS_IRONLAKE_M(dev_priv)) {
|
||||
|
@ -6698,21 +6695,13 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
* paths, so the _noresume version is enough (and in case of
|
||||
* runtime resume it's necessary).
|
||||
*/
|
||||
if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
|
||||
round_jiffies_up_relative(HZ)))
|
||||
if (queue_delayed_work(dev_priv->wq,
|
||||
&dev_priv->rps.autoenable_work,
|
||||
round_jiffies_up_relative(HZ)))
|
||||
intel_runtime_pm_get_noresume(dev_priv);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_INFO(dev_priv)->gen < 6)
|
||||
return;
|
||||
|
||||
gen6_suspend_rps(dev_priv);
|
||||
dev_priv->rps.enabled = false;
|
||||
}
|
||||
|
||||
static void ibx_init_clock_gating(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
@ -7787,7 +7776,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
|||
if (!i915_gem_request_completed(req))
|
||||
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
|
||||
|
||||
i915_gem_request_unreference(req);
|
||||
i915_gem_request_put(req);
|
||||
kfree(boost);
|
||||
}
|
||||
|
||||
|
@ -7805,8 +7794,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
|
|||
if (boost == NULL)
|
||||
return;
|
||||
|
||||
i915_gem_request_reference(req);
|
||||
boost->req = req;
|
||||
boost->req = i915_gem_request_get(req);
|
||||
|
||||
INIT_WORK(&boost->work, __intel_rps_boost_work);
|
||||
queue_work(req->i915->wq, &boost->work);
|
||||
|
@ -7819,11 +7807,9 @@ void intel_pm_setup(struct drm_device *dev)
|
|||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
spin_lock_init(&dev_priv->rps.client_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
|
||||
intel_gen6_powersave_work);
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
|
||||
__intel_autoenable_gt_powersave);
|
||||
INIT_LIST_HEAD(&dev_priv->rps.clients);
|
||||
INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
|
||||
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
|
||||
|
||||
dev_priv->pm.suspended = false;
|
||||
atomic_set(&dev_priv->pm.wakeref_count, 0);
|
||||
|
|
|
@ -645,9 +645,8 @@ unlock:
|
|||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
static void intel_psr_exit(struct drm_device *dev)
|
||||
static void intel_psr_exit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_dp *intel_dp = dev_priv->psr.enabled;
|
||||
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
|
||||
enum pipe pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
@ -656,7 +655,7 @@ static void intel_psr_exit(struct drm_device *dev)
|
|||
if (!dev_priv->psr.active)
|
||||
return;
|
||||
|
||||
if (HAS_DDI(dev)) {
|
||||
if (HAS_DDI(dev_priv)) {
|
||||
val = I915_READ(EDP_PSR_CTL);
|
||||
|
||||
WARN_ON(!(val & EDP_PSR_ENABLE));
|
||||
|
@ -691,7 +690,7 @@ static void intel_psr_exit(struct drm_device *dev)
|
|||
|
||||
/**
|
||||
* intel_psr_single_frame_update - Single Frame Update
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* Some platforms support a single frame update feature that is used to
|
||||
|
@ -699,10 +698,9 @@ static void intel_psr_exit(struct drm_device *dev)
|
|||
* So far it is only implemented for Valleyview and Cherryview because
|
||||
* hardware requires this to be done before a page flip.
|
||||
*/
|
||||
void intel_psr_single_frame_update(struct drm_device *dev,
|
||||
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
u32 val;
|
||||
|
@ -711,7 +709,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
|
|||
* Single frame update is already supported on BDW+ but it requires
|
||||
* many W/A and it isn't really needed.
|
||||
*/
|
||||
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
|
@ -737,7 +735,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
|
|||
|
||||
/**
|
||||
* intel_psr_invalidate - Invalidade PSR
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
||||
|
@ -747,10 +745,9 @@ void intel_psr_single_frame_update(struct drm_device *dev,
|
|||
*
|
||||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
|
||||
*/
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -767,14 +764,14 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
intel_psr_exit(dev_priv);
|
||||
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_flush - Flush PSR
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
|
@ -785,10 +782,9 @@ void intel_psr_invalidate(struct drm_device *dev,
|
|||
*
|
||||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits, enum fb_op_origin origin)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
|
@ -806,7 +802,7 @@ void intel_psr_flush(struct drm_device *dev,
|
|||
|
||||
/* By definition flush = invalidate + flush */
|
||||
if (frontbuffer_bits)
|
||||
intel_psr_exit(dev);
|
||||
intel_psr_exit(dev_priv);
|
||||
|
||||
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||
if (!work_busy(&dev_priv->psr.work.work))
|
||||
|
|
|
@ -24,12 +24,13 @@
|
|||
#ifndef _INTEL_RENDERSTATE_H
|
||||
#define _INTEL_RENDERSTATE_H
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include <linux/types.h>
|
||||
|
||||
extern const struct intel_renderstate_rodata gen6_null_state;
|
||||
extern const struct intel_renderstate_rodata gen7_null_state;
|
||||
extern const struct intel_renderstate_rodata gen8_null_state;
|
||||
extern const struct intel_renderstate_rodata gen9_null_state;
|
||||
struct intel_renderstate_rodata {
|
||||
const u32 *reloc;
|
||||
const u32 *batch;
|
||||
const u32 batch_items;
|
||||
};
|
||||
|
||||
#define RO_RENDERSTATE(_g) \
|
||||
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
|
||||
|
@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state;
|
|||
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
|
||||
}
|
||||
|
||||
extern const struct intel_renderstate_rodata gen6_null_state;
|
||||
extern const struct intel_renderstate_rodata gen7_null_state;
|
||||
extern const struct intel_renderstate_rodata gen8_null_state;
|
||||
extern const struct intel_renderstate_rodata gen9_null_state;
|
||||
|
||||
#endif /* INTEL_RENDERSTATE_H */
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <linux/hashtable.h>
|
||||
#include "i915_gem_batch_pool.h"
|
||||
#include "i915_gem_request.h"
|
||||
|
||||
#define I915_CMD_HASH_ORDER 9
|
||||
|
||||
|
@ -31,23 +32,23 @@ struct intel_hw_status_page {
|
|||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
|
||||
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
|
||||
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
|
||||
#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
|
||||
|
||||
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
|
||||
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
|
||||
#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
|
||||
#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
|
||||
|
||||
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
|
||||
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
|
||||
#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
|
||||
#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
|
||||
|
||||
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
|
||||
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
|
||||
#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
|
||||
#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
|
||||
|
||||
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
|
||||
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
|
||||
#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
|
||||
#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
|
||||
|
||||
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
|
||||
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
|
||||
#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
|
||||
#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
|
||||
|
||||
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
|
||||
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
|
||||
|
@ -62,7 +63,7 @@ struct intel_hw_status_page {
|
|||
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
|
||||
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
|
||||
|
||||
enum intel_ring_hangcheck_action {
|
||||
enum intel_engine_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
HANGCHECK_ACTIVE,
|
||||
|
@ -72,24 +73,26 @@ enum intel_ring_hangcheck_action {
|
|||
|
||||
#define HANGCHECK_SCORE_RING_HUNG 31
|
||||
|
||||
struct intel_ring_hangcheck {
|
||||
struct intel_engine_hangcheck {
|
||||
u64 acthd;
|
||||
unsigned long user_interrupts;
|
||||
u32 seqno;
|
||||
int score;
|
||||
enum intel_ring_hangcheck_action action;
|
||||
enum intel_engine_hangcheck_action action;
|
||||
int deadlock;
|
||||
u32 instdone[I915_NUM_INSTDONE_REG];
|
||||
};
|
||||
|
||||
struct intel_ringbuffer {
|
||||
struct intel_ring {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void __iomem *virtual_start;
|
||||
void *vaddr;
|
||||
struct i915_vma *vma;
|
||||
|
||||
struct intel_engine_cs *engine;
|
||||
struct list_head link;
|
||||
|
||||
struct list_head request_list;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
int space;
|
||||
|
@ -146,8 +149,10 @@ struct intel_engine_cs {
|
|||
unsigned int exec_id;
|
||||
unsigned int hw_id;
|
||||
unsigned int guc_id; /* XXX same as hw_id? */
|
||||
u64 fence_context;
|
||||
u32 mmio_base;
|
||||
struct intel_ringbuffer *buffer;
|
||||
unsigned int irq_shift;
|
||||
struct intel_ring *buffer;
|
||||
struct list_head buffers;
|
||||
|
||||
/* Rather than have every client wait upon all user interrupts,
|
||||
|
@ -195,33 +200,34 @@ struct intel_engine_cs {
|
|||
|
||||
u32 irq_keep_mask; /* always keep these interrupts */
|
||||
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
|
||||
void (*irq_enable)(struct intel_engine_cs *ring);
|
||||
void (*irq_disable)(struct intel_engine_cs *ring);
|
||||
void (*irq_enable)(struct intel_engine_cs *engine);
|
||||
void (*irq_disable)(struct intel_engine_cs *engine);
|
||||
|
||||
int (*init_hw)(struct intel_engine_cs *ring);
|
||||
int (*init_hw)(struct intel_engine_cs *engine);
|
||||
|
||||
int (*init_context)(struct drm_i915_gem_request *req);
|
||||
|
||||
void (*write_tail)(struct intel_engine_cs *ring,
|
||||
u32 value);
|
||||
int __must_check (*flush)(struct drm_i915_gem_request *req,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*add_request)(struct drm_i915_gem_request *req);
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
u32 mode);
|
||||
#define EMIT_INVALIDATE BIT(0)
|
||||
#define EMIT_FLUSH BIT(1)
|
||||
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
|
||||
int (*emit_bb_start)(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned int dispatch_flags);
|
||||
#define I915_DISPATCH_SECURE BIT(0)
|
||||
#define I915_DISPATCH_PINNED BIT(1)
|
||||
#define I915_DISPATCH_RS BIT(2)
|
||||
int (*emit_request)(struct drm_i915_gem_request *req);
|
||||
void (*submit_request)(struct drm_i915_gem_request *req);
|
||||
/* Some chipsets are not quite as coherent as advertised and need
|
||||
* an expensive kick to force a true read of the up-to-date seqno.
|
||||
* However, the up-to-date seqno is not always required and the last
|
||||
* seen value is good enough. Note that the seqno will always be
|
||||
* monotonic, even if not coherent.
|
||||
*/
|
||||
void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
|
||||
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
|
||||
u64 offset, u32 length,
|
||||
unsigned dispatch_flags);
|
||||
#define I915_DISPATCH_SECURE 0x1
|
||||
#define I915_DISPATCH_PINNED 0x2
|
||||
#define I915_DISPATCH_RS 0x4
|
||||
void (*cleanup)(struct intel_engine_cs *ring);
|
||||
void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
|
||||
void (*cleanup)(struct intel_engine_cs *engine);
|
||||
|
||||
/* GEN8 signal/wait table - never trust comments!
|
||||
* signal to signal to signal to signal to signal to
|
||||
|
@ -274,12 +280,9 @@ struct intel_engine_cs {
|
|||
};
|
||||
|
||||
/* AKA wait() */
|
||||
int (*sync_to)(struct drm_i915_gem_request *to_req,
|
||||
struct intel_engine_cs *from,
|
||||
u32 seqno);
|
||||
int (*signal)(struct drm_i915_gem_request *signaller_req,
|
||||
/* num_dwords needed by caller */
|
||||
unsigned int num_dwords);
|
||||
int (*sync_to)(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_gem_request *signal);
|
||||
int (*signal)(struct drm_i915_gem_request *req);
|
||||
} semaphore;
|
||||
|
||||
/* Execlists */
|
||||
|
@ -291,24 +294,6 @@ struct intel_engine_cs {
|
|||
unsigned int idle_lite_restore_wa;
|
||||
bool disable_lite_restore_wa;
|
||||
u32 ctx_desc_template;
|
||||
int (*emit_request)(struct drm_i915_gem_request *request);
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*emit_bb_start)(struct drm_i915_gem_request *req,
|
||||
u64 offset, unsigned dispatch_flags);
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering from the
|
||||
* ringbuffer.
|
||||
*
|
||||
* Includes buffers having the contents of their GPU caches
|
||||
* flushed, not necessarily primitives. last_read_req
|
||||
* represents when the rendering involved will be completed.
|
||||
*
|
||||
* A reference is held on the buffer while on this list.
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/**
|
||||
* List of breadcrumbs associated with GPU requests currently
|
||||
|
@ -323,11 +308,16 @@ struct intel_engine_cs {
|
|||
*/
|
||||
u32 last_submitted_seqno;
|
||||
|
||||
bool gpu_caches_dirty;
|
||||
/* An RCU guarded pointer to the last request. No reference is
|
||||
* held to the request, users must carefully acquire a reference to
|
||||
* the request using i915_gem_active_get_request_rcu(), or hold the
|
||||
* struct_mutex.
|
||||
*/
|
||||
struct i915_gem_active last_request;
|
||||
|
||||
struct i915_gem_context *last_context;
|
||||
|
||||
struct intel_ring_hangcheck hangcheck;
|
||||
struct intel_engine_hangcheck hangcheck;
|
||||
|
||||
struct {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -338,7 +328,7 @@ struct intel_engine_cs {
|
|||
|
||||
/*
|
||||
* Table of commands the command parser needs to know about
|
||||
* for this ring.
|
||||
* for this engine.
|
||||
*/
|
||||
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
|
||||
|
||||
|
@ -352,11 +342,11 @@ struct intel_engine_cs {
|
|||
* Returns the bitmask for the length field of the specified command.
|
||||
* Return 0 for an unrecognized/invalid command.
|
||||
*
|
||||
* If the command parser finds an entry for a command in the ring's
|
||||
* If the command parser finds an entry for a command in the engine's
|
||||
* cmd_tables, it gets the command's length based on the table entry.
|
||||
* If not, it calls this function to determine the per-ring length field
|
||||
* encoding for the command (i.e. certain opcode ranges use certain bits
|
||||
* to encode the command length in the header).
|
||||
* If not, it calls this function to determine the per-engine length
|
||||
* field encoding for the command (i.e. different opcode ranges use
|
||||
* certain bits to encode the command length in the header).
|
||||
*/
|
||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||
};
|
||||
|
@ -374,8 +364,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
static inline u32
|
||||
intel_ring_sync_index(struct intel_engine_cs *engine,
|
||||
struct intel_engine_cs *other)
|
||||
intel_engine_sync_index(struct intel_engine_cs *engine,
|
||||
struct intel_engine_cs *other)
|
||||
{
|
||||
int idx;
|
||||
|
||||
|
@ -437,55 +427,76 @@ intel_write_status_page(struct intel_engine_cs *engine,
|
|||
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
struct intel_ringbuffer *
|
||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||
struct intel_ring *
|
||||
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
|
||||
int intel_ring_pin(struct intel_ring *ring);
|
||||
void intel_ring_unpin(struct intel_ring *ring);
|
||||
void intel_ring_free(struct intel_ring *ring);
|
||||
|
||||
void intel_stop_engine(struct intel_engine_cs *engine);
|
||||
void intel_cleanup_engine(struct intel_engine_cs *engine);
|
||||
void intel_engine_stop(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
|
||||
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
|
||||
static inline void intel_ring_emit(struct intel_engine_cs *engine,
|
||||
u32 data)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
|
||||
i915_reg_t reg)
|
||||
{
|
||||
intel_ring_emit(engine, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
static inline void intel_ring_advance(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_ringbuffer *ringbuf = engine->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
|
||||
|
||||
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
|
||||
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
|
||||
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
|
||||
static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
|
||||
{
|
||||
*(uint32_t *)(ring->vaddr + ring->tail) = data;
|
||||
ring->tail += 4;
|
||||
}
|
||||
|
||||
static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
|
||||
{
|
||||
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
|
||||
}
|
||||
|
||||
static inline void intel_ring_advance(struct intel_ring *ring)
|
||||
{
|
||||
/* Dummy function.
|
||||
*
|
||||
* This serves as a placeholder in the code so that the reader
|
||||
* can compare against the preceding intel_ring_begin() and
|
||||
* check that the number of dwords emitted matches the space
|
||||
* reserved for the command packet (i.e. the value passed to
|
||||
* intel_ring_begin()).
|
||||
*/
|
||||
}
|
||||
|
||||
static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
|
||||
{
|
||||
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
|
||||
return value & (ring->size - 1);
|
||||
}
|
||||
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
void intel_ring_update_space(struct intel_ring *ring);
|
||||
|
||||
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
|
||||
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
void intel_engine_setup_common(struct intel_engine_cs *engine);
|
||||
int intel_engine_init_common(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
|
||||
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
|
||||
static inline int intel_engine_idle(struct intel_engine_cs *engine,
|
||||
bool interruptible)
|
||||
{
|
||||
/* Wait upon the last request to be completed */
|
||||
return i915_gem_active_wait_unlocked(&engine->last_request,
|
||||
interruptible, NULL, NULL);
|
||||
}
|
||||
|
||||
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
|
||||
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
|
||||
|
||||
u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
|
||||
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
|
@ -493,11 +504,6 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
|||
|
||||
int init_workarounds_ring(struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
return ringbuf->tail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Arbitrary size for largest possible 'add request' sequence. The code paths
|
||||
* are complex and variable. Empirical measurement shows that the worst case
|
||||
|
@ -513,17 +519,6 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
|
||||
struct intel_wait {
|
||||
struct rb_node node;
|
||||
struct task_struct *tsk;
|
||||
u32 seqno;
|
||||
};
|
||||
|
||||
struct intel_signal_node {
|
||||
struct rb_node node;
|
||||
struct intel_wait wait;
|
||||
};
|
||||
|
||||
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
|
||||
|
||||
static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
|
||||
|
@ -570,4 +565,9 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
|
|||
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
|
||||
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
|
||||
|
||||
static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
|
||||
{
|
||||
return i915_gem_active_isset(&engine->last_request);
|
||||
}
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -430,7 +431,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
|||
*/
|
||||
sprctl |= SP_GAMMA_ENABLE;
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
sprctl |= SP_TILED;
|
||||
|
||||
/* Sizes are 0 based */
|
||||
|
@ -467,7 +468,7 @@ vlv_update_plane(struct drm_plane *dplane,
|
|||
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
|
||||
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
|
||||
else
|
||||
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
|
||||
|
@ -552,7 +553,7 @@ ivb_update_plane(struct drm_plane *plane,
|
|||
*/
|
||||
sprctl |= SPRITE_GAMMA_ENABLE;
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
sprctl |= SPRITE_TILED;
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
|
@ -606,7 +607,7 @@ ivb_update_plane(struct drm_plane *plane,
|
|||
* register */
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
|
||||
else if (obj->tiling_mode != I915_TILING_NONE)
|
||||
else if (i915_gem_object_is_tiled(obj))
|
||||
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
|
||||
else
|
||||
I915_WRITE(SPRLINOFF(pipe), linear_offset);
|
||||
|
@ -693,7 +694,7 @@ ilk_update_plane(struct drm_plane *plane,
|
|||
*/
|
||||
dvscntr |= DVS_GAMMA_ENABLE;
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
dvscntr |= DVS_TILED;
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
|
@ -736,7 +737,7 @@ ilk_update_plane(struct drm_plane *plane,
|
|||
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
|
||||
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
|
||||
else
|
||||
I915_WRITE(DVSLINOFF(pipe), linear_offset);
|
||||
|
|
|
@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
|
|||
i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_disable_gt_powersave(dev_priv);
|
||||
intel_sanitize_gt_powersave(dev_priv);
|
||||
}
|
||||
|
||||
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
|
||||
|
@ -1618,8 +1618,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
|
|||
* @timeout_ms: timeout in millisecond
|
||||
*
|
||||
* This routine waits until the target register @reg contains the expected
|
||||
* @value after applying the @mask, i.e. it waits until
|
||||
* (I915_READ_FW(@reg) & @mask) == @value
|
||||
* @value after applying the @mask, i.e. it waits until ::
|
||||
*
|
||||
* (I915_READ_FW(reg) & mask) == value
|
||||
*
|
||||
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
|
||||
*
|
||||
* Note that this routine assumes the caller holds forcewake asserted, it is
|
||||
|
@ -1652,8 +1654,10 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
|
|||
* @timeout_ms: timeout in millisecond
|
||||
*
|
||||
* This routine waits until the target register @reg contains the expected
|
||||
* @value after applying the @mask, i.e. it waits until
|
||||
* (I915_READ(@reg) & @mask) == @value
|
||||
* @value after applying the @mask, i.e. it waits until ::
|
||||
*
|
||||
* (I915_READ(reg) & mask) == value
|
||||
*
|
||||
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
|
||||
*
|
||||
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
|
||||
|
|
|
@ -62,6 +62,30 @@ extern "C" {
|
|||
#define I915_ERROR_UEVENT "ERROR"
|
||||
#define I915_RESET_UEVENT "RESET"
|
||||
|
||||
/*
|
||||
* MOCS indexes used for GPU surfaces, defining the cacheability of the
|
||||
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.
|
||||
*/
|
||||
enum i915_mocs_table_index {
|
||||
/*
|
||||
* Not cached anywhere, coherency between CPU and GPU accesses is
|
||||
* guaranteed.
|
||||
*/
|
||||
I915_MOCS_UNCACHED,
|
||||
/*
|
||||
* Cacheability and coherency controlled by the kernel automatically
|
||||
* based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
|
||||
* usage of the surface (used for display scanout or not).
|
||||
*/
|
||||
I915_MOCS_PTE,
|
||||
/*
|
||||
* Cached in all GPU caches available on the platform.
|
||||
* Coherency between CPU and GPU accesses to the surface is not
|
||||
* guaranteed without extra synchronization.
|
||||
*/
|
||||
I915_MOCS_CACHED,
|
||||
};
|
||||
|
||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||
*/
|
||||
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
|
||||
|
@ -698,15 +722,20 @@ struct drm_i915_gem_exec_object2 {
|
|||
*/
|
||||
__u64 offset;
|
||||
|
||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
|
||||
#define EXEC_OBJECT_WRITE (1<<2)
|
||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
|
||||
#define EXEC_OBJECT_WRITE (1<<2)
|
||||
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
|
||||
#define EXEC_OBJECT_PINNED (1<<4)
|
||||
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
|
||||
#define EXEC_OBJECT_PINNED (1<<4)
|
||||
#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
|
||||
/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
|
||||
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
|
||||
__u64 flags;
|
||||
|
||||
__u64 rsvd1;
|
||||
union {
|
||||
__u64 rsvd1;
|
||||
__u64 pad_to_size;
|
||||
};
|
||||
__u64 rsvd2;
|
||||
};
|
||||
|
||||
|
@ -897,6 +926,7 @@ struct drm_i915_gem_caching {
|
|||
#define I915_TILING_NONE 0
|
||||
#define I915_TILING_X 1
|
||||
#define I915_TILING_Y 2
|
||||
#define I915_TILING_LAST I915_TILING_Y
|
||||
|
||||
#define I915_BIT_6_SWIZZLE_NONE 0
|
||||
#define I915_BIT_6_SWIZZLE_9 1
|
||||
|
|
Loading…
Reference in a new issue