Preparing the ringbuffer for adding new commands can fail (a timeout whilst waiting for the GPU to catch up and free some space). So check for any potential error before overwriting HEAD with new commands, and propagate that error back to the user where possible. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
124 lines
3.7 KiB
C
124 lines
3.7 KiB
C
#ifndef _INTEL_RINGBUFFER_H_
|
|
#define _INTEL_RINGBUFFER_H_
|
|
|
|
struct intel_hw_status_page {
|
|
u32 __iomem *page_addr;
|
|
unsigned int gfx_addr;
|
|
struct drm_gem_object *obj;
|
|
};
|
|
|
|
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
|
|
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
|
|
#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
|
|
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
|
|
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
|
|
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
|
|
#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
|
|
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
|
|
|
|
struct drm_i915_gem_execbuffer2;
|
|
struct intel_ring_buffer {
|
|
const char *name;
|
|
enum intel_ring_id {
|
|
RING_RENDER = 0x1,
|
|
RING_BSD = 0x2,
|
|
RING_BLT = 0x4,
|
|
} id;
|
|
u32 mmio_base;
|
|
unsigned long size;
|
|
void *virtual_start;
|
|
struct drm_device *dev;
|
|
struct drm_gem_object *gem_object;
|
|
|
|
unsigned int head;
|
|
unsigned int tail;
|
|
int space;
|
|
struct intel_hw_status_page status_page;
|
|
|
|
u32 irq_gem_seqno; /* last seq seem at irq time */
|
|
u32 waiting_gem_seqno;
|
|
int user_irq_refcount;
|
|
void (*user_irq_get)(struct intel_ring_buffer *ring);
|
|
void (*user_irq_put)(struct intel_ring_buffer *ring);
|
|
|
|
int (*init)(struct intel_ring_buffer *ring);
|
|
|
|
void (*write_tail)(struct intel_ring_buffer *ring,
|
|
u32 value);
|
|
void (*flush)(struct intel_ring_buffer *ring,
|
|
u32 invalidate_domains,
|
|
u32 flush_domains);
|
|
u32 (*add_request)(struct intel_ring_buffer *ring,
|
|
u32 flush_domains);
|
|
u32 (*get_seqno)(struct intel_ring_buffer *ring);
|
|
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
|
|
struct drm_i915_gem_execbuffer2 *exec,
|
|
struct drm_clip_rect *cliprects,
|
|
uint64_t exec_offset);
|
|
|
|
/**
|
|
* List of objects currently involved in rendering from the
|
|
* ringbuffer.
|
|
*
|
|
* Includes buffers having the contents of their GPU caches
|
|
* flushed, not necessarily primitives. last_rendering_seqno
|
|
* represents when the rendering involved will be completed.
|
|
*
|
|
* A reference is held on the buffer while on this list.
|
|
*/
|
|
struct list_head active_list;
|
|
|
|
/**
|
|
* List of breadcrumbs associated with GPU requests currently
|
|
* outstanding.
|
|
*/
|
|
struct list_head request_list;
|
|
|
|
/**
|
|
* List of objects currently pending a GPU write flush.
|
|
*
|
|
* All elements on this list will belong to either the
|
|
* active_list or flushing_list, last_rendering_seqno can
|
|
* be used to differentiate between the two elements.
|
|
*/
|
|
struct list_head gpu_write_list;
|
|
|
|
/**
|
|
* Do we have some not yet emitted requests outstanding?
|
|
*/
|
|
bool outstanding_lazy_request;
|
|
|
|
wait_queue_head_t irq_queue;
|
|
drm_local_map_t map;
|
|
};
|
|
|
|
static inline u32
|
|
intel_read_status_page(struct intel_ring_buffer *ring,
|
|
int reg)
|
|
{
|
|
return ioread32(ring->status_page.page_addr + reg);
|
|
}
|
|
|
|
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
|
|
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
|
|
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
|
|
|
|
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
|
|
u32 data)
|
|
{
|
|
iowrite32(data, ring->virtual_start + ring->tail);
|
|
ring->tail += 4;
|
|
}
|
|
|
|
void intel_ring_advance(struct intel_ring_buffer *ring);
|
|
|
|
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
|
|
|
|
int intel_init_render_ring_buffer(struct drm_device *dev);
|
|
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
|
int intel_init_blt_ring_buffer(struct drm_device *dev);
|
|
|
|
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
|
|
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
|
|
|
|
#endif /* _INTEL_RINGBUFFER_H_ */
|