mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 10:36:45 +07:00
drm/i915: Store a i915 backpointer from engine, and use it
text data bss dec hex filename 6309351 3578714 696320 10584385 a18141 vmlinux 6308391 3578714 696320 10583425 a17d81 vmlinux Almost 1KiB of code reduction. v2: More s/INTEL_INFO()->gen/INTEL_GEN()/ and IS_GENx() conversions text data bss dec hex filename 6304579 3578778 696320 10579677 a16edd vmlinux 6303427 3578778 696320 10578525 a16a5d vmlinux Now over 1KiB! Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1462545621-30125-3-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
e1382efb60
commit
c033666a94
@ -751,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||||||
int cmd_table_count;
|
int cmd_table_count;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!IS_GEN7(engine->dev))
|
if (!IS_GEN7(engine->i915))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (engine->id) {
|
switch (engine->id) {
|
||||||
case RCS:
|
case RCS:
|
||||||
if (IS_HASWELL(engine->dev)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
cmd_tables = hsw_render_ring_cmds;
|
cmd_tables = hsw_render_ring_cmds;
|
||||||
cmd_table_count =
|
cmd_table_count =
|
||||||
ARRAY_SIZE(hsw_render_ring_cmds);
|
ARRAY_SIZE(hsw_render_ring_cmds);
|
||||||
@ -765,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||||||
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_HASWELL(engine->dev)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
engine->reg_tables = hsw_render_reg_tables;
|
engine->reg_tables = hsw_render_reg_tables;
|
||||||
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
|
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
|
||||||
} else {
|
} else {
|
||||||
@ -781,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||||||
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
|
||||||
break;
|
break;
|
||||||
case BCS:
|
case BCS:
|
||||||
if (IS_HASWELL(engine->dev)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
cmd_tables = hsw_blt_ring_cmds;
|
cmd_tables = hsw_blt_ring_cmds;
|
||||||
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
|
||||||
} else {
|
} else {
|
||||||
@ -789,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
|||||||
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_HASWELL(engine->dev)) {
|
if (IS_HASWELL(engine->i915)) {
|
||||||
engine->reg_tables = hsw_blt_reg_tables;
|
engine->reg_tables = hsw_blt_reg_tables;
|
||||||
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
|
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
|
||||||
} else {
|
} else {
|
||||||
@ -1036,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
|
|||||||
if (!engine->needs_cmd_parser)
|
if (!engine->needs_cmd_parser)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!USES_PPGTT(engine->dev))
|
if (!USES_PPGTT(engine->i915))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return (i915.enable_cmd_parser == 1);
|
return (i915.enable_cmd_parser == 1);
|
||||||
|
@ -1380,7 +1380,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||||||
seqno[id] = engine->get_seqno(engine);
|
seqno[id] = engine->get_seqno(engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_get_extra_instdone(dev, instdone);
|
i915_get_extra_instdone(dev_priv, instdone);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
@ -3157,7 +3157,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
int j, ret;
|
int j, ret;
|
||||||
|
|
||||||
if (!i915_semaphore_is_enabled(dev)) {
|
if (!i915_semaphore_is_enabled(dev_priv)) {
|
||||||
seq_puts(m, "Semaphores are disabled\n");
|
seq_puts(m, "Semaphores are disabled\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -4757,7 +4757,7 @@ i915_wedged_set(void *data, u64 val)
|
|||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
|
||||||
i915_handle_error(dev, val,
|
i915_handle_error(dev_priv, val,
|
||||||
"Manually setting wedged to %llu", val);
|
"Manually setting wedged to %llu", val);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
@ -4907,7 +4907,7 @@ i915_drop_caches_set(void *data, u64 val)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (val & (DROP_RETIRE | DROP_ACTIVE))
|
if (val & (DROP_RETIRE | DROP_ACTIVE))
|
||||||
i915_gem_retire_requests(dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
|
|
||||||
if (val & DROP_BOUND)
|
if (val & DROP_BOUND)
|
||||||
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
|
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
|
||||||
|
@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||||||
value = 1;
|
value = 1;
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_SEMAPHORES:
|
case I915_PARAM_HAS_SEMAPHORES:
|
||||||
value = i915_semaphore_is_enabled(dev);
|
value = i915_semaphore_is_enabled(dev_priv);
|
||||||
break;
|
break;
|
||||||
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
|
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
|
||||||
value = 1;
|
value = 1;
|
||||||
@ -970,7 +970,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||||||
info->has_eu_pg ? "y" : "n");
|
info->has_eu_pg ? "y" : "n");
|
||||||
|
|
||||||
i915.enable_execlists =
|
i915.enable_execlists =
|
||||||
intel_sanitize_enable_execlists(dev, i915.enable_execlists);
|
intel_sanitize_enable_execlists(dev_priv,
|
||||||
|
i915.enable_execlists);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* i915.enable_ppgtt is read-only, so do an early pass to validate the
|
* i915.enable_ppgtt is read-only, so do an early pass to validate the
|
||||||
@ -979,7 +980,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
|||||||
* than every time we check intel_enable_ppgtt().
|
* than every time we check intel_enable_ppgtt().
|
||||||
*/
|
*/
|
||||||
i915.enable_ppgtt =
|
i915.enable_ppgtt =
|
||||||
intel_sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
|
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
|
||||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1345,7 +1346,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
|||||||
* Notify a valid surface after modesetting,
|
* Notify a valid surface after modesetting,
|
||||||
* when running inside a VM.
|
* when running inside a VM.
|
||||||
*/
|
*/
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
|
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
|
||||||
|
|
||||||
i915_setup_sysfs(dev);
|
i915_setup_sysfs(dev);
|
||||||
|
@ -530,9 +530,9 @@ void intel_detect_pch(struct drm_device *dev)
|
|||||||
pci_dev_put(pch);
|
pci_dev_put(pch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool i915_semaphore_is_enabled(struct drm_device *dev)
|
bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
if (INTEL_INFO(dev)->gen < 6)
|
if (INTEL_GEN(dev_priv) < 6)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (i915.semaphores >= 0)
|
if (i915.semaphores >= 0)
|
||||||
@ -544,7 +544,7 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
|||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
/* Enable semaphores on SNB when IO remapping is off */
|
/* Enable semaphores on SNB when IO remapping is off */
|
||||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
|
if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -914,9 +914,9 @@ int i915_resume_switcheroo(struct drm_device *dev)
|
|||||||
* - re-init interrupt state
|
* - re-init interrupt state
|
||||||
* - re-init display
|
* - re-init display
|
||||||
*/
|
*/
|
||||||
int i915_reset(struct drm_device *dev)
|
int i915_reset(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||||
unsigned reset_counter;
|
unsigned reset_counter;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2754,7 +2754,8 @@ extern int i915_max_ioctl;
|
|||||||
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||||
|
|
||||||
int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt);
|
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
|
int enable_ppgtt);
|
||||||
|
|
||||||
/* i915_dma.c */
|
/* i915_dma.c */
|
||||||
void __printf(3, 4)
|
void __printf(3, 4)
|
||||||
@ -2778,7 +2779,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
|||||||
#endif
|
#endif
|
||||||
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
|
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
|
||||||
extern bool intel_has_gpu_reset(struct drm_device *dev);
|
extern bool intel_has_gpu_reset(struct drm_device *dev);
|
||||||
extern int i915_reset(struct drm_device *dev);
|
extern int i915_reset(struct drm_i915_private *dev_priv);
|
||||||
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
|
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
|
||||||
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
|
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
|
||||||
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
||||||
@ -2796,9 +2797,10 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
|||||||
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
||||||
|
|
||||||
/* i915_irq.c */
|
/* i915_irq.c */
|
||||||
void i915_queue_hangcheck(struct drm_device *dev);
|
void i915_queue_hangcheck(struct drm_i915_private *dev_priv);
|
||||||
__printf(3, 4)
|
__printf(3, 4)
|
||||||
void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
void i915_handle_error(struct drm_i915_private *dev_priv,
|
||||||
|
u32 engine_mask,
|
||||||
const char *fmt, ...);
|
const char *fmt, ...);
|
||||||
|
|
||||||
extern void intel_irq_init(struct drm_i915_private *dev_priv);
|
extern void intel_irq_init(struct drm_i915_private *dev_priv);
|
||||||
@ -2828,9 +2830,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
|
|||||||
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
|
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
||||||
static inline bool intel_vgpu_active(struct drm_device *dev)
|
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return to_i915(dev)->vgpu.active;
|
return dev_priv->vgpu.active;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -3098,13 +3100,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
|||||||
req->seqno);
|
req->seqno);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
|
int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
|
||||||
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
|
||||||
|
|
||||||
struct drm_i915_gem_request *
|
struct drm_i915_gem_request *
|
||||||
i915_gem_find_active_request(struct intel_engine_cs *engine);
|
i915_gem_find_active_request(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
|
||||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
|
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
|
||||||
@ -3351,9 +3353,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
|
|||||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||||
|
|
||||||
/* belongs in i915_gem_gtt.h */
|
/* belongs in i915_gem_gtt.h */
|
||||||
static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
if (INTEL_INFO(dev)->gen < 6)
|
if (INTEL_GEN(dev_priv) < 6)
|
||||||
intel_gtt_chipset_flush();
|
intel_gtt_chipset_flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3432,14 +3434,15 @@ static inline void i915_error_state_buf_release(
|
|||||||
{
|
{
|
||||||
kfree(eb->buf);
|
kfree(eb->buf);
|
||||||
}
|
}
|
||||||
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
||||||
|
u32 engine_mask,
|
||||||
const char *error_msg);
|
const char *error_msg);
|
||||||
void i915_error_state_get(struct drm_device *dev,
|
void i915_error_state_get(struct drm_device *dev,
|
||||||
struct i915_error_state_file_priv *error_priv);
|
struct i915_error_state_file_priv *error_priv);
|
||||||
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
|
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
|
||||||
void i915_destroy_error_state(struct drm_device *dev);
|
void i915_destroy_error_state(struct drm_device *dev);
|
||||||
|
|
||||||
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
|
void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
|
||||||
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
||||||
|
|
||||||
/* i915_cmd_parser.c */
|
/* i915_cmd_parser.c */
|
||||||
@ -3550,18 +3553,20 @@ extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
|||||||
extern void intel_detect_pch(struct drm_device *dev);
|
extern void intel_detect_pch(struct drm_device *dev);
|
||||||
extern int intel_enable_rc6(const struct drm_device *dev);
|
extern int intel_enable_rc6(const struct drm_device *dev);
|
||||||
|
|
||||||
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
|
extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
|
||||||
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file);
|
struct drm_file *file);
|
||||||
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
|
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file);
|
struct drm_file *file);
|
||||||
|
|
||||||
/* overlay */
|
/* overlay */
|
||||||
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
|
extern struct intel_overlay_error_state *
|
||||||
|
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
|
||||||
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
|
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
|
||||||
struct intel_overlay_error_state *error);
|
struct intel_overlay_error_state *error);
|
||||||
|
|
||||||
extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
|
extern struct intel_display_error_state *
|
||||||
|
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
|
||||||
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
||||||
struct drm_device *dev,
|
struct drm_device *dev,
|
||||||
struct intel_display_error_state *error);
|
struct intel_display_error_state *error);
|
||||||
|
@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
|||||||
vaddr += PAGE_SIZE;
|
vaddr += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_chipset_flush(obj->base.dev);
|
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||||
|
|
||||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||||
if (st == NULL)
|
if (st == NULL)
|
||||||
@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
|||||||
}
|
}
|
||||||
|
|
||||||
drm_clflush_virt_range(vaddr, args->size);
|
drm_clflush_virt_range(vaddr, args->size);
|
||||||
i915_gem_chipset_flush(dev);
|
i915_gem_chipset_flush(to_i915(dev));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||||
@ -1006,7 +1006,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (needs_clflush_after)
|
if (needs_clflush_after)
|
||||||
i915_gem_chipset_flush(dev);
|
i915_gem_chipset_flush(to_i915(dev));
|
||||||
else
|
else
|
||||||
obj->cache_dirty = true;
|
obj->cache_dirty = true;
|
||||||
|
|
||||||
@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|||||||
struct intel_rps_client *rps)
|
struct intel_rps_client *rps)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = req->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
const bool irq_test_in_progress =
|
const bool irq_test_in_progress =
|
||||||
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
|
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
|
||||||
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||||
@ -1429,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
|||||||
struct intel_engine_cs *engine = req->engine;
|
struct intel_engine_cs *engine = req->engine;
|
||||||
struct drm_i915_gem_request *tmp;
|
struct drm_i915_gem_request *tmp;
|
||||||
|
|
||||||
lockdep_assert_held(&engine->dev->struct_mutex);
|
lockdep_assert_held(&engine->i915->dev->struct_mutex);
|
||||||
|
|
||||||
if (list_empty(&req->list))
|
if (list_empty(&req->list))
|
||||||
return;
|
return;
|
||||||
@ -2505,9 +2504,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2517,7 +2515,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
i915_gem_retire_requests(dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
|
|
||||||
/* Finally reset hw state */
|
/* Finally reset hw state */
|
||||||
for_each_engine(engine, dev_priv)
|
for_each_engine(engine, dev_priv)
|
||||||
@ -2537,7 +2535,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
|
|||||||
/* HWS page needs to be set less than what we
|
/* HWS page needs to be set less than what we
|
||||||
* will inject to ring
|
* will inject to ring
|
||||||
*/
|
*/
|
||||||
ret = i915_gem_init_seqno(dev, seqno - 1);
|
ret = i915_gem_init_seqno(dev_priv, seqno - 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -2553,13 +2551,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
/* reserve 0 for non-seqno */
|
/* reserve 0 for non-seqno */
|
||||||
if (dev_priv->next_seqno == 0) {
|
if (dev_priv->next_seqno == 0) {
|
||||||
int ret = i915_gem_init_seqno(dev, 0);
|
int ret = i915_gem_init_seqno(dev_priv, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -2657,7 +2653,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
|||||||
/* Not allowed to fail! */
|
/* Not allowed to fail! */
|
||||||
WARN(ret, "emit|add_request failed: %d!\n", ret);
|
WARN(ret, "emit|add_request failed: %d!\n", ret);
|
||||||
|
|
||||||
i915_queue_hangcheck(engine->dev);
|
i915_queue_hangcheck(engine->i915);
|
||||||
|
|
||||||
queue_delayed_work(dev_priv->wq,
|
queue_delayed_work(dev_priv->wq,
|
||||||
&dev_priv->mm.retire_work,
|
&dev_priv->mm.retire_work,
|
||||||
@ -2731,7 +2727,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||||||
struct intel_context *ctx,
|
struct intel_context *ctx,
|
||||||
struct drm_i915_gem_request **req_out)
|
struct drm_i915_gem_request **req_out)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
||||||
struct drm_i915_gem_request *req;
|
struct drm_i915_gem_request *req;
|
||||||
int ret;
|
int ret;
|
||||||
@ -2753,7 +2749,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||||||
if (req == NULL)
|
if (req == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = i915_gem_get_seqno(engine->dev, &req->seqno);
|
ret = i915_gem_get_seqno(engine->i915, &req->seqno);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@ -2810,7 +2806,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (ctx == NULL)
|
if (ctx == NULL)
|
||||||
ctx = to_i915(engine->dev)->kernel_context;
|
ctx = engine->i915->kernel_context;
|
||||||
err = __i915_gem_request_alloc(engine, ctx, &req);
|
err = __i915_gem_request_alloc(engine, ctx, &req);
|
||||||
return err ? ERR_PTR(err) : req;
|
return err ? ERR_PTR(err) : req;
|
||||||
}
|
}
|
||||||
@ -2985,9 +2981,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
i915_gem_retire_requests(struct drm_device *dev)
|
i915_gem_retire_requests(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
bool idle = true;
|
bool idle = true;
|
||||||
|
|
||||||
@ -3020,7 +3015,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
|||||||
/* Come back later if the device is busy... */
|
/* Come back later if the device is busy... */
|
||||||
idle = false;
|
idle = false;
|
||||||
if (mutex_trylock(&dev->struct_mutex)) {
|
if (mutex_trylock(&dev->struct_mutex)) {
|
||||||
idle = i915_gem_retire_requests(dev);
|
idle = i915_gem_retire_requests(dev_priv);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
if (!idle)
|
if (!idle)
|
||||||
@ -3189,7 +3184,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
|||||||
if (i915_gem_request_completed(from_req, true))
|
if (i915_gem_request_completed(from_req, true))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!i915_semaphore_is_enabled(obj->base.dev)) {
|
if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||||
ret = __i915_wait_request(from_req,
|
ret = __i915_wait_request(from_req,
|
||||||
i915->mm.interruptible,
|
i915->mm.interruptible,
|
||||||
@ -3722,7 +3717,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (i915_gem_clflush_object(obj, obj->pin_display))
|
if (i915_gem_clflush_object(obj, obj->pin_display))
|
||||||
i915_gem_chipset_flush(obj->base.dev);
|
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||||
|
|
||||||
old_write_domain = obj->base.write_domain;
|
old_write_domain = obj->base.write_domain;
|
||||||
obj->base.write_domain = 0;
|
obj->base.write_domain = 0;
|
||||||
@ -3920,7 +3915,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||||||
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
|
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
|
||||||
cpu_write_needs_clflush(obj)) {
|
cpu_write_needs_clflush(obj)) {
|
||||||
if (i915_gem_clflush_object(obj, true))
|
if (i915_gem_clflush_object(obj, true))
|
||||||
i915_gem_chipset_flush(obj->base.dev);
|
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -4698,7 +4693,7 @@ i915_gem_suspend(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
i915_gem_retire_requests(dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
|
|
||||||
i915_gem_stop_engines(dev);
|
i915_gem_stop_engines(dev);
|
||||||
i915_gem_context_lost(dev_priv);
|
i915_gem_context_lost(dev_priv);
|
||||||
@ -4989,7 +4984,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
|
|||||||
else
|
else
|
||||||
dev_priv->num_fence_regs = 8;
|
dev_priv->num_fence_regs = 8;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
dev_priv->num_fence_regs =
|
dev_priv->num_fence_regs =
|
||||||
I915_READ(vgtif_reg(avail_rs.fence_num));
|
I915_READ(vgtif_reg(avail_rs.fence_num));
|
||||||
|
|
||||||
|
@ -99,28 +99,27 @@
|
|||||||
#define GEN6_CONTEXT_ALIGN (64<<10)
|
#define GEN6_CONTEXT_ALIGN (64<<10)
|
||||||
#define GEN7_CONTEXT_ALIGN 4096
|
#define GEN7_CONTEXT_ALIGN 4096
|
||||||
|
|
||||||
static size_t get_context_alignment(struct drm_device *dev)
|
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
if (IS_GEN6(dev))
|
if (IS_GEN6(dev_priv))
|
||||||
return GEN6_CONTEXT_ALIGN;
|
return GEN6_CONTEXT_ALIGN;
|
||||||
|
|
||||||
return GEN7_CONTEXT_ALIGN;
|
return GEN7_CONTEXT_ALIGN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_context_size(struct drm_device *dev)
|
static int get_context_size(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
int ret;
|
int ret;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
switch (INTEL_INFO(dev)->gen) {
|
switch (INTEL_GEN(dev_priv)) {
|
||||||
case 6:
|
case 6:
|
||||||
reg = I915_READ(CXT_SIZE);
|
reg = I915_READ(CXT_SIZE);
|
||||||
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
|
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
|
||||||
break;
|
break;
|
||||||
case 7:
|
case 7:
|
||||||
reg = I915_READ(GEN7_CXT_SIZE);
|
reg = I915_READ(GEN7_CXT_SIZE);
|
||||||
if (IS_HASWELL(dev))
|
if (IS_HASWELL(dev_priv))
|
||||||
ret = HSW_CXT_TOTAL_SIZE;
|
ret = HSW_CXT_TOTAL_SIZE;
|
||||||
else
|
else
|
||||||
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
|
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
|
||||||
@ -224,7 +223,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
|
|||||||
* Flush any pending retires to hopefully release some
|
* Flush any pending retires to hopefully release some
|
||||||
* stale contexts and try again.
|
* stale contexts and try again.
|
||||||
*/
|
*/
|
||||||
i915_gem_retire_requests(dev_priv->dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
ret = ida_simple_get(&dev_priv->context_hw_ida,
|
ret = ida_simple_get(&dev_priv->context_hw_ida,
|
||||||
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
|
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -320,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev,
|
|||||||
* context.
|
* context.
|
||||||
*/
|
*/
|
||||||
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
|
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
|
||||||
get_context_alignment(dev), 0);
|
get_context_alignment(to_i915(dev)), 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
|
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
|
||||||
goto err_destroy;
|
goto err_destroy;
|
||||||
@ -389,7 +388,8 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||||||
if (WARN_ON(dev_priv->kernel_context))
|
if (WARN_ON(dev_priv->kernel_context))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
|
if (intel_vgpu_active(dev_priv) &&
|
||||||
|
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
|
||||||
if (!i915.enable_execlists) {
|
if (!i915.enable_execlists) {
|
||||||
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
|
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -404,8 +404,9 @@ int i915_gem_context_init(struct drm_device *dev)
|
|||||||
/* NB: intentionally left blank. We will allocate our own
|
/* NB: intentionally left blank. We will allocate our own
|
||||||
* backing objects as we need them, thank you very much */
|
* backing objects as we need them, thank you very much */
|
||||||
dev_priv->hw_context_size = 0;
|
dev_priv->hw_context_size = 0;
|
||||||
} else if (HAS_HW_CONTEXTS(dev)) {
|
} else if (HAS_HW_CONTEXTS(dev_priv)) {
|
||||||
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
|
dev_priv->hw_context_size =
|
||||||
|
round_up(get_context_size(dev_priv), 4096);
|
||||||
if (dev_priv->hw_context_size > (1<<20)) {
|
if (dev_priv->hw_context_size > (1<<20)) {
|
||||||
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
|
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
|
||||||
dev_priv->hw_context_size);
|
dev_priv->hw_context_size);
|
||||||
@ -509,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
|
|||||||
static inline int
|
static inline int
|
||||||
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = req->i915;
|
||||||
struct intel_engine_cs *engine = req->engine;
|
struct intel_engine_cs *engine = req->engine;
|
||||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||||
const int num_rings =
|
const int num_rings =
|
||||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||||
i915_semaphore_is_enabled(engine->dev) ?
|
i915_semaphore_is_enabled(dev_priv) ?
|
||||||
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
|
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
|
||||||
0;
|
0;
|
||||||
int len, ret;
|
int len, ret;
|
||||||
|
|
||||||
@ -523,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||||||
* explicitly, so we rely on the value at ring init, stored in
|
* explicitly, so we rely on the value at ring init, stored in
|
||||||
* itlb_before_ctx_switch.
|
* itlb_before_ctx_switch.
|
||||||
*/
|
*/
|
||||||
if (IS_GEN6(engine->dev)) {
|
if (IS_GEN6(dev_priv)) {
|
||||||
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* These flags are for resource streamer on HSW+ */
|
/* These flags are for resource streamer on HSW+ */
|
||||||
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
|
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
|
||||||
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
|
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
|
||||||
else if (INTEL_INFO(engine->dev)->gen < 8)
|
else if (INTEL_GEN(dev_priv) < 8)
|
||||||
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
|
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
|
||||||
|
|
||||||
|
|
||||||
len = 4;
|
len = 4;
|
||||||
if (INTEL_INFO(engine->dev)->gen >= 7)
|
if (INTEL_GEN(dev_priv) >= 7)
|
||||||
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
|
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
|
||||||
|
|
||||||
ret = intel_ring_begin(req, len);
|
ret = intel_ring_begin(req, len);
|
||||||
@ -545,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||||
if (INTEL_INFO(engine->dev)->gen >= 7) {
|
if (INTEL_GEN(dev_priv) >= 7) {
|
||||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||||
if (num_rings) {
|
if (num_rings) {
|
||||||
struct intel_engine_cs *signaller;
|
struct intel_engine_cs *signaller;
|
||||||
|
|
||||||
intel_ring_emit(engine,
|
intel_ring_emit(engine,
|
||||||
MI_LOAD_REGISTER_IMM(num_rings));
|
MI_LOAD_REGISTER_IMM(num_rings));
|
||||||
for_each_engine(signaller, to_i915(engine->dev)) {
|
for_each_engine(signaller, dev_priv) {
|
||||||
if (signaller == engine)
|
if (signaller == engine)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -575,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
|||||||
*/
|
*/
|
||||||
intel_ring_emit(engine, MI_NOOP);
|
intel_ring_emit(engine, MI_NOOP);
|
||||||
|
|
||||||
if (INTEL_INFO(engine->dev)->gen >= 7) {
|
if (INTEL_GEN(dev_priv) >= 7) {
|
||||||
if (num_rings) {
|
if (num_rings) {
|
||||||
struct intel_engine_cs *signaller;
|
struct intel_engine_cs *signaller;
|
||||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||||
|
|
||||||
intel_ring_emit(engine,
|
intel_ring_emit(engine,
|
||||||
MI_LOAD_REGISTER_IMM(num_rings));
|
MI_LOAD_REGISTER_IMM(num_rings));
|
||||||
for_each_engine(signaller, to_i915(engine->dev)) {
|
for_each_engine(signaller, dev_priv) {
|
||||||
if (signaller == engine)
|
if (signaller == engine)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -673,7 +675,7 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
|
|||||||
if (engine->id != RCS)
|
if (engine->id != RCS)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (INTEL_INFO(engine->dev)->gen < 8)
|
if (INTEL_GEN(engine->i915) < 8)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -710,7 +712,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
|||||||
|
|
||||||
/* Trying to pin first makes error handling easier. */
|
/* Trying to pin first makes error handling easier. */
|
||||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||||
get_context_alignment(engine->dev),
|
get_context_alignment(engine->i915),
|
||||||
0);
|
0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
i915_gem_retire_requests(dev);
|
i915_gem_retire_requests(to_i915(dev));
|
||||||
goto search_again;
|
goto search_again;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
i915_gem_retire_requests(vm->dev);
|
i915_gem_retire_requests(to_i915(vm->dev));
|
||||||
|
|
||||||
WARN_ON(!list_empty(&vm->active_list));
|
WARN_ON(!list_empty(&vm->active_list));
|
||||||
}
|
}
|
||||||
|
@ -724,7 +724,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
|
|||||||
struct i915_address_space *vm;
|
struct i915_address_space *vm;
|
||||||
struct list_head ordered_vmas;
|
struct list_head ordered_vmas;
|
||||||
struct list_head pinned_vmas;
|
struct list_head pinned_vmas;
|
||||||
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
|
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
|
||||||
int retry;
|
int retry;
|
||||||
|
|
||||||
i915_gem_retire_requests_ring(engine);
|
i915_gem_retire_requests_ring(engine);
|
||||||
@ -965,7 +965,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flush_chipset)
|
if (flush_chipset)
|
||||||
i915_gem_chipset_flush(req->engine->dev);
|
i915_gem_chipset_flush(req->engine->i915);
|
||||||
|
|
||||||
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
if (flush_domains & I915_GEM_DOMAIN_GTT)
|
||||||
wmb();
|
wmb();
|
||||||
@ -1119,7 +1119,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
|||||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||||
i915_gem_request_assign(&obj->last_fenced_req, req);
|
i915_gem_request_assign(&obj->last_fenced_req, req);
|
||||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||||
&dev_priv->mm.fence_list);
|
&dev_priv->mm.fence_list);
|
||||||
}
|
}
|
||||||
|
@ -110,17 +110,19 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
|
|||||||
.type = I915_GGTT_VIEW_ROTATED,
|
.type = I915_GGTT_VIEW_ROTATED,
|
||||||
};
|
};
|
||||||
|
|
||||||
int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
|
int enable_ppgtt)
|
||||||
{
|
{
|
||||||
bool has_aliasing_ppgtt;
|
bool has_aliasing_ppgtt;
|
||||||
bool has_full_ppgtt;
|
bool has_full_ppgtt;
|
||||||
bool has_full_48bit_ppgtt;
|
bool has_full_48bit_ppgtt;
|
||||||
|
|
||||||
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
|
has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
|
||||||
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
|
has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
|
||||||
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
|
has_full_48bit_ppgtt =
|
||||||
|
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
has_full_ppgtt = false; /* emulation is too hard */
|
has_full_ppgtt = false; /* emulation is too hard */
|
||||||
|
|
||||||
if (!has_aliasing_ppgtt)
|
if (!has_aliasing_ppgtt)
|
||||||
@ -130,7 +132,7 @@ int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
|||||||
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
|
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
|
||||||
* execlists, the sole mechanism available to submit work.
|
* execlists, the sole mechanism available to submit work.
|
||||||
*/
|
*/
|
||||||
if (enable_ppgtt == 0 && INTEL_INFO(dev)->gen < 9)
|
if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (enable_ppgtt == 1)
|
if (enable_ppgtt == 1)
|
||||||
@ -144,19 +146,19 @@ int intel_sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
|||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU
|
#ifdef CONFIG_INTEL_IOMMU
|
||||||
/* Disable ppgtt on SNB if VT-d is on. */
|
/* Disable ppgtt on SNB if VT-d is on. */
|
||||||
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
|
if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
|
||||||
DRM_INFO("Disabling PPGTT because VT-d is on\n");
|
DRM_INFO("Disabling PPGTT because VT-d is on\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Early VLV doesn't have this */
|
/* Early VLV doesn't have this */
|
||||||
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
|
if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
|
||||||
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
|
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
|
||||||
return has_full_48bit_ppgtt ? 3 : 2;
|
return has_full_48bit_ppgtt ? 3 : 2;
|
||||||
else
|
else
|
||||||
return has_aliasing_ppgtt ? 1 : 0;
|
return has_aliasing_ppgtt ? 1 : 0;
|
||||||
@ -994,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
|||||||
{
|
{
|
||||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||||
|
|
||||||
if (intel_vgpu_active(vm->dev))
|
if (intel_vgpu_active(to_i915(vm->dev)))
|
||||||
gen8_ppgtt_notify_vgt(ppgtt, false);
|
gen8_ppgtt_notify_vgt(ppgtt, false);
|
||||||
|
|
||||||
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
|
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
|
||||||
@ -1545,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||||||
0, 0,
|
0, 0,
|
||||||
GEN8_PML4E_SHIFT);
|
GEN8_PML4E_SHIFT);
|
||||||
|
|
||||||
if (intel_vgpu_active(ppgtt->base.dev)) {
|
if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
|
||||||
ret = gen8_preallocate_top_level_pdps(ppgtt);
|
ret = gen8_preallocate_top_level_pdps(ppgtt);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_scratch;
|
goto free_scratch;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (intel_vgpu_active(ppgtt->base.dev))
|
if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
|
||||||
gen8_ppgtt_notify_vgt(ppgtt, true);
|
gen8_ppgtt_notify_vgt(ppgtt, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -2080,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
|||||||
} else
|
} else
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
ppgtt->switch_mm = vgpu_mm_switch;
|
ppgtt->switch_mm = vgpu_mm_switch;
|
||||||
|
|
||||||
ret = gen6_ppgtt_alloc(ppgtt);
|
ret = gen6_ppgtt_alloc(ppgtt);
|
||||||
@ -2729,7 +2731,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
|||||||
i915_address_space_init(&ggtt->base, dev_priv);
|
i915_address_space_init(&ggtt->base, dev_priv);
|
||||||
ggtt->base.total += PAGE_SIZE;
|
ggtt->base.total += PAGE_SIZE;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev)) {
|
if (intel_vgpu_active(dev_priv)) {
|
||||||
ret = intel_vgt_balloon(dev);
|
ret = intel_vgt_balloon(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -2833,7 +2835,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
|
|||||||
i915_gem_cleanup_stolen(dev);
|
i915_gem_cleanup_stolen(dev);
|
||||||
|
|
||||||
if (drm_mm_initialized(&ggtt->base.mm)) {
|
if (drm_mm_initialized(&ggtt->base.mm)) {
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
intel_vgt_deballoon();
|
intel_vgt_deballoon();
|
||||||
|
|
||||||
drm_mm_takedown(&ggtt->base.mm);
|
drm_mm_takedown(&ggtt->base.mm);
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
#include "intel_renderstate.h"
|
#include "intel_renderstate.h"
|
||||||
|
|
||||||
static const struct intel_renderstate_rodata *
|
static const struct intel_renderstate_rodata *
|
||||||
render_state_get_rodata(struct drm_device *dev, const int gen)
|
render_state_get_rodata(const int gen)
|
||||||
{
|
{
|
||||||
switch (gen) {
|
switch (gen) {
|
||||||
case 6:
|
case 6:
|
||||||
@ -45,19 +45,20 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int render_state_init(struct render_state *so, struct drm_device *dev)
|
static int render_state_init(struct render_state *so,
|
||||||
|
struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
so->gen = INTEL_INFO(dev)->gen;
|
so->gen = INTEL_GEN(dev_priv);
|
||||||
so->rodata = render_state_get_rodata(dev, so->gen);
|
so->rodata = render_state_get_rodata(so->gen);
|
||||||
if (so->rodata == NULL)
|
if (so->rodata == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (so->rodata->batch_items * 4 > 4096)
|
if (so->rodata->batch_items * 4 > 4096)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
so->obj = i915_gem_object_create(dev, 4096);
|
so->obj = i915_gem_object_create(dev_priv->dev, 4096);
|
||||||
if (IS_ERR(so->obj))
|
if (IS_ERR(so->obj))
|
||||||
return PTR_ERR(so->obj);
|
return PTR_ERR(so->obj);
|
||||||
|
|
||||||
@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
|
|||||||
if (WARN_ON(engine->id != RCS))
|
if (WARN_ON(engine->id != RCS))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
ret = render_state_init(so, engine->dev);
|
ret = render_state_init(so, engine->i915);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
|
|
||||||
trace_i915_gem_shrink(dev_priv, target, flags);
|
trace_i915_gem_shrink(dev_priv, target, flags);
|
||||||
i915_gem_retire_requests(dev_priv->dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unbinding of objects will require HW access; Let us not wake the
|
* Unbinding of objects will require HW access; Let us not wake the
|
||||||
@ -209,7 +209,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
|||||||
if (flags & I915_SHRINK_BOUND)
|
if (flags & I915_SHRINK_BOUND)
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
i915_gem_retire_requests(dev_priv->dev);
|
i915_gem_retire_requests(dev_priv);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
|||||||
return error_code;
|
return error_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_gem_record_fences(struct drm_device *dev,
|
static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
|
||||||
struct drm_i915_error_state *error)
|
struct drm_i915_error_state *error)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (IS_GEN3(dev) || IS_GEN2(dev)) {
|
if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
|
||||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||||
error->fence[i] = I915_READ(FENCE_REG(i));
|
error->fence[i] = I915_READ(FENCE_REG(i));
|
||||||
} else if (IS_GEN5(dev) || IS_GEN4(dev)) {
|
} else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
|
||||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||||
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
|
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
|
||||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||||
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
|
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
|
||||||
}
|
}
|
||||||
@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||||||
struct intel_engine_cs *to;
|
struct intel_engine_cs *to;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
|
|
||||||
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
if (!i915_semaphore_is_enabled(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!error->semaphore_obj)
|
if (!error->semaphore_obj)
|
||||||
@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_record_ring_state(struct drm_device *dev,
|
static void i915_record_ring_state(struct drm_i915_private *dev_priv,
|
||||||
struct drm_i915_error_state *error,
|
struct drm_i915_error_state *error,
|
||||||
struct intel_engine_cs *engine,
|
struct intel_engine_cs *engine,
|
||||||
struct drm_i915_error_ring *ering)
|
struct drm_i915_error_ring *ering)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
if (INTEL_GEN(dev_priv) >= 6) {
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 6) {
|
|
||||||
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
|
||||||
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
|
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
|
||||||
if (INTEL_INFO(dev)->gen >= 8)
|
if (INTEL_GEN(dev_priv) >= 8)
|
||||||
gen8_record_semaphore_state(dev_priv, error, engine,
|
gen8_record_semaphore_state(dev_priv, error, engine,
|
||||||
ering);
|
ering);
|
||||||
else
|
else
|
||||||
gen6_record_semaphore_state(dev_priv, engine, ering);
|
gen6_record_semaphore_state(dev_priv, engine, ering);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 4) {
|
if (INTEL_GEN(dev_priv) >= 4) {
|
||||||
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||||
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
|
||||||
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||||
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
|
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
|
||||||
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
|
||||||
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
|
||||||
if (INTEL_INFO(dev)->gen >= 8) {
|
if (INTEL_GEN(dev_priv) >= 8) {
|
||||||
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
|
||||||
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
|
||||||
}
|
}
|
||||||
@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||||||
ering->tail = I915_READ_TAIL(engine);
|
ering->tail = I915_READ_TAIL(engine);
|
||||||
ering->ctl = I915_READ_CTL(engine);
|
ering->ctl = I915_READ_CTL(engine);
|
||||||
|
|
||||||
if (I915_NEED_GFX_HWS(dev)) {
|
if (I915_NEED_GFX_HWS(dev_priv)) {
|
||||||
i915_reg_t mmio;
|
i915_reg_t mmio;
|
||||||
|
|
||||||
if (IS_GEN7(dev)) {
|
if (IS_GEN7(dev_priv)) {
|
||||||
switch (engine->id) {
|
switch (engine->id) {
|
||||||
default:
|
default:
|
||||||
case RCS:
|
case RCS:
|
||||||
@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||||||
mmio = VEBOX_HWS_PGA_GEN7;
|
mmio = VEBOX_HWS_PGA_GEN7;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else if (IS_GEN6(engine->dev)) {
|
} else if (IS_GEN6(engine->i915)) {
|
||||||
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
|
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
|
||||||
} else {
|
} else {
|
||||||
/* XXX: gen8 returns to sanity */
|
/* XXX: gen8 returns to sanity */
|
||||||
@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
|
|||||||
ering->hangcheck_score = engine->hangcheck.score;
|
ering->hangcheck_score = engine->hangcheck.score;
|
||||||
ering->hangcheck_action = engine->hangcheck.action;
|
ering->hangcheck_action = engine->hangcheck.action;
|
||||||
|
|
||||||
if (USES_PPGTT(dev)) {
|
if (USES_PPGTT(dev_priv)) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||||
|
|
||||||
if (IS_GEN6(dev))
|
if (IS_GEN6(dev_priv))
|
||||||
ering->vm_info.pp_dir_base =
|
ering->vm_info.pp_dir_base =
|
||||||
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
I915_READ(RING_PP_DIR_BASE_READ(engine));
|
||||||
else if (IS_GEN7(dev))
|
else if (IS_GEN7(dev_priv))
|
||||||
ering->vm_info.pp_dir_base =
|
ering->vm_info.pp_dir_base =
|
||||||
I915_READ(RING_PP_DIR_BASE(engine));
|
I915_READ(RING_PP_DIR_BASE(engine));
|
||||||
else if (INTEL_INFO(dev)->gen >= 8)
|
else if (INTEL_GEN(dev_priv) >= 8)
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
ering->vm_info.pdp[i] =
|
ering->vm_info.pdp[i] =
|
||||||
I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||||
@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
|
|||||||
struct drm_i915_error_state *error,
|
struct drm_i915_error_state *error,
|
||||||
struct drm_i915_error_ring *ering)
|
struct drm_i915_error_ring *ering)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
/* Currently render ring is the only HW context user */
|
/* Currently render ring is the only HW context user */
|
||||||
@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_gem_record_rings(struct drm_device *dev,
|
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
||||||
struct drm_i915_error_state *error)
|
struct drm_i915_error_state *error)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
||||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||||
struct drm_i915_gem_request *request;
|
struct drm_i915_gem_request *request;
|
||||||
int i, count;
|
int i, count;
|
||||||
@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
|||||||
|
|
||||||
error->ring[i].pid = -1;
|
error->ring[i].pid = -1;
|
||||||
|
|
||||||
if (engine->dev == NULL)
|
if (!intel_engine_initialized(engine))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
error->ring[i].valid = true;
|
error->ring[i].valid = true;
|
||||||
|
|
||||||
i915_record_ring_state(dev, error, engine, &error->ring[i]);
|
i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
|
||||||
|
|
||||||
request = i915_gem_find_active_request(engine);
|
request = i915_gem_find_active_request(engine);
|
||||||
if (request) {
|
if (request) {
|
||||||
@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
|||||||
error->eir = I915_READ(EIR);
|
error->eir = I915_READ(EIR);
|
||||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||||
|
|
||||||
i915_get_extra_instdone(dev, error->extra_instdone);
|
i915_get_extra_instdone(dev_priv, error->extra_instdone);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_error_capture_msg(struct drm_device *dev,
|
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
|
||||||
struct drm_i915_error_state *error,
|
struct drm_i915_error_state *error,
|
||||||
u32 engine_mask,
|
u32 engine_mask,
|
||||||
const char *error_msg)
|
const char *error_msg)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
u32 ecode;
|
u32 ecode;
|
||||||
int ring_id = -1, len;
|
int ring_id = -1, len;
|
||||||
|
|
||||||
@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
|
|||||||
|
|
||||||
len = scnprintf(error->error_msg, sizeof(error->error_msg),
|
len = scnprintf(error->error_msg, sizeof(error->error_msg),
|
||||||
"GPU HANG: ecode %d:%d:0x%08x",
|
"GPU HANG: ecode %d:%d:0x%08x",
|
||||||
INTEL_INFO(dev)->gen, ring_id, ecode);
|
INTEL_GEN(dev_priv), ring_id, ecode);
|
||||||
|
|
||||||
if (ring_id != -1 && error->ring[ring_id].pid != -1)
|
if (ring_id != -1 && error->ring[ring_id].pid != -1)
|
||||||
len += scnprintf(error->error_msg + len,
|
len += scnprintf(error->error_msg + len,
|
||||||
@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
|
|||||||
* out a structure which becomes available in debugfs for user level tools
|
* out a structure which becomes available in debugfs for user level tools
|
||||||
* to pick up.
|
* to pick up.
|
||||||
*/
|
*/
|
||||||
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
||||||
|
u32 engine_mask,
|
||||||
const char *error_msg)
|
const char *error_msg)
|
||||||
{
|
{
|
||||||
static bool warned;
|
static bool warned;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct drm_i915_error_state *error;
|
struct drm_i915_error_state *error;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
|||||||
i915_capture_gen_state(dev_priv, error);
|
i915_capture_gen_state(dev_priv, error);
|
||||||
i915_capture_reg_state(dev_priv, error);
|
i915_capture_reg_state(dev_priv, error);
|
||||||
i915_gem_capture_buffers(dev_priv, error);
|
i915_gem_capture_buffers(dev_priv, error);
|
||||||
i915_gem_record_fences(dev, error);
|
i915_gem_record_fences(dev_priv, error);
|
||||||
i915_gem_record_rings(dev, error);
|
i915_gem_record_rings(dev_priv, error);
|
||||||
|
|
||||||
do_gettimeofday(&error->time);
|
do_gettimeofday(&error->time);
|
||||||
|
|
||||||
error->overlay = intel_overlay_capture_error_state(dev);
|
error->overlay = intel_overlay_capture_error_state(dev_priv);
|
||||||
error->display = intel_display_capture_error_state(dev);
|
error->display = intel_display_capture_error_state(dev_priv);
|
||||||
|
|
||||||
i915_error_capture_msg(dev, error, engine_mask, error_msg);
|
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
|
||||||
DRM_INFO("%s\n", error->error_msg);
|
DRM_INFO("%s\n", error->error_msg);
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
|
||||||
@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
|
|||||||
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||||
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||||
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
||||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
|
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
|
||||||
warned = true;
|
warned = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* NB: please notice the memset */
|
/* NB: please notice the memset */
|
||||||
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
|
void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
|
||||||
|
uint32_t *instdone)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
|
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
|
||||||
|
|
||||||
if (IS_GEN2(dev) || IS_GEN3(dev))
|
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
|
||||||
instdone[0] = I915_READ(GEN2_INSTDONE);
|
instdone[0] = I915_READ(GEN2_INSTDONE);
|
||||||
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
|
else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
|
||||||
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
||||||
instdone[1] = I915_READ(GEN4_INSTDONE1);
|
instdone[1] = I915_READ(GEN4_INSTDONE1);
|
||||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
} else if (INTEL_GEN(dev_priv) >= 7) {
|
||||||
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
||||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||||
|
@ -2537,15 +2537,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
|||||||
* Fire an error uevent so userspace can see that a hang or error
|
* Fire an error uevent so userspace can see that a hang or error
|
||||||
* was detected.
|
* was detected.
|
||||||
*/
|
*/
|
||||||
static void i915_reset_and_wakeup(struct drm_device *dev)
|
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
|
||||||
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
||||||
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
||||||
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
|
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that there's only one work item which does gpu resets, so we
|
* Note that there's only one work item which does gpu resets, so we
|
||||||
@ -2559,8 +2559,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
|||||||
*/
|
*/
|
||||||
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
|
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
|
||||||
DRM_DEBUG_DRIVER("resetting chip\n");
|
DRM_DEBUG_DRIVER("resetting chip\n");
|
||||||
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
|
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
|
||||||
reset_event);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In most cases it's guaranteed that we get here with an RPM
|
* In most cases it's guaranteed that we get here with an RPM
|
||||||
@ -2571,7 +2570,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
|||||||
*/
|
*/
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
|
||||||
intel_prepare_reset(dev);
|
intel_prepare_reset(dev_priv);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All state reset _must_ be completed before we update the
|
* All state reset _must_ be completed before we update the
|
||||||
@ -2579,14 +2578,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
|||||||
* pending state and not properly drop locks, resulting in
|
* pending state and not properly drop locks, resulting in
|
||||||
* deadlocks with the reset work.
|
* deadlocks with the reset work.
|
||||||
*/
|
*/
|
||||||
ret = i915_reset(dev);
|
ret = i915_reset(dev_priv);
|
||||||
|
|
||||||
intel_finish_reset(dev);
|
intel_finish_reset(dev_priv);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
kobject_uevent_env(&dev->primary->kdev->kobj,
|
kobject_uevent_env(kobj,
|
||||||
KOBJ_CHANGE, reset_done_event);
|
KOBJ_CHANGE, reset_done_event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2597,9 +2596,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_report_and_clear_eir(struct drm_device *dev)
|
static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
uint32_t instdone[I915_NUM_INSTDONE_REG];
|
uint32_t instdone[I915_NUM_INSTDONE_REG];
|
||||||
u32 eir = I915_READ(EIR);
|
u32 eir = I915_READ(EIR);
|
||||||
int pipe, i;
|
int pipe, i;
|
||||||
@ -2609,9 +2607,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
|||||||
|
|
||||||
pr_err("render error detected, EIR: 0x%08x\n", eir);
|
pr_err("render error detected, EIR: 0x%08x\n", eir);
|
||||||
|
|
||||||
i915_get_extra_instdone(dev, instdone);
|
i915_get_extra_instdone(dev_priv, instdone);
|
||||||
|
|
||||||
if (IS_G4X(dev)) {
|
if (IS_G4X(dev_priv)) {
|
||||||
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
|
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
|
||||||
u32 ipeir = I915_READ(IPEIR_I965);
|
u32 ipeir = I915_READ(IPEIR_I965);
|
||||||
|
|
||||||
@ -2633,7 +2631,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!IS_GEN2(dev)) {
|
if (!IS_GEN2(dev_priv)) {
|
||||||
if (eir & I915_ERROR_PAGE_TABLE) {
|
if (eir & I915_ERROR_PAGE_TABLE) {
|
||||||
u32 pgtbl_err = I915_READ(PGTBL_ER);
|
u32 pgtbl_err = I915_READ(PGTBL_ER);
|
||||||
pr_err("page table error\n");
|
pr_err("page table error\n");
|
||||||
@ -2655,7 +2653,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
|||||||
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
|
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
|
||||||
for (i = 0; i < ARRAY_SIZE(instdone); i++)
|
for (i = 0; i < ARRAY_SIZE(instdone); i++)
|
||||||
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
|
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
|
||||||
if (INTEL_INFO(dev)->gen < 4) {
|
if (INTEL_GEN(dev_priv) < 4) {
|
||||||
u32 ipeir = I915_READ(IPEIR);
|
u32 ipeir = I915_READ(IPEIR);
|
||||||
|
|
||||||
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
|
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
|
||||||
@ -2699,10 +2697,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
|||||||
* so userspace knows something bad happened (should trigger collection
|
* so userspace knows something bad happened (should trigger collection
|
||||||
* of a ring dump etc.).
|
* of a ring dump etc.).
|
||||||
*/
|
*/
|
||||||
void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
void i915_handle_error(struct drm_i915_private *dev_priv,
|
||||||
|
u32 engine_mask,
|
||||||
const char *fmt, ...)
|
const char *fmt, ...)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
va_list args;
|
va_list args;
|
||||||
char error_msg[80];
|
char error_msg[80];
|
||||||
|
|
||||||
@ -2710,8 +2708,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
|||||||
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
|
||||||
va_end(args);
|
va_end(args);
|
||||||
|
|
||||||
i915_capture_error_state(dev, engine_mask, error_msg);
|
i915_capture_error_state(dev_priv, engine_mask, error_msg);
|
||||||
i915_report_and_clear_eir(dev);
|
i915_report_and_clear_eir(dev_priv);
|
||||||
|
|
||||||
if (engine_mask) {
|
if (engine_mask) {
|
||||||
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
|
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
|
||||||
@ -2733,7 +2731,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
|
|||||||
i915_error_wake_up(dev_priv, false);
|
i915_error_wake_up(dev_priv, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_reset_and_wakeup(dev);
|
i915_reset_and_wakeup(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called from drm generic code, passed 'crtc' which
|
/* Called from drm generic code, passed 'crtc' which
|
||||||
@ -2851,9 +2849,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
|
ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
|
||||||
{
|
{
|
||||||
if (INTEL_INFO(dev)->gen >= 8) {
|
if (INTEL_GEN(dev_priv) >= 8) {
|
||||||
return (ipehr >> 23) == 0x1c;
|
return (ipehr >> 23) == 0x1c;
|
||||||
} else {
|
} else {
|
||||||
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
|
||||||
@ -2866,10 +2864,10 @@ static struct intel_engine_cs *
|
|||||||
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
||||||
u64 offset)
|
u64 offset)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct intel_engine_cs *signaller;
|
struct intel_engine_cs *signaller;
|
||||||
|
|
||||||
if (INTEL_INFO(dev_priv)->gen >= 8) {
|
if (INTEL_GEN(dev_priv) >= 8) {
|
||||||
for_each_engine(signaller, dev_priv) {
|
for_each_engine(signaller, dev_priv) {
|
||||||
if (engine == signaller)
|
if (engine == signaller)
|
||||||
continue;
|
continue;
|
||||||
@ -2898,7 +2896,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
|
|||||||
static struct intel_engine_cs *
|
static struct intel_engine_cs *
|
||||||
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
u32 cmd, ipehr, head;
|
u32 cmd, ipehr, head;
|
||||||
u64 offset = 0;
|
u64 offset = 0;
|
||||||
int i, backwards;
|
int i, backwards;
|
||||||
@ -2924,7 +2922,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
|
||||||
if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
|
if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2936,7 +2934,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||||||
* ringbuffer itself.
|
* ringbuffer itself.
|
||||||
*/
|
*/
|
||||||
head = I915_READ_HEAD(engine) & HEAD_ADDR;
|
head = I915_READ_HEAD(engine) & HEAD_ADDR;
|
||||||
backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
|
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
|
||||||
|
|
||||||
for (i = backwards; i; --i) {
|
for (i = backwards; i; --i) {
|
||||||
/*
|
/*
|
||||||
@ -2958,7 +2956,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
|
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
|
||||||
if (INTEL_INFO(engine->dev)->gen >= 8) {
|
if (INTEL_GEN(dev_priv) >= 8) {
|
||||||
offset = ioread32(engine->buffer->virtual_start + head + 12);
|
offset = ioread32(engine->buffer->virtual_start + head + 12);
|
||||||
offset <<= 32;
|
offset <<= 32;
|
||||||
offset = ioread32(engine->buffer->virtual_start + head + 8);
|
offset = ioread32(engine->buffer->virtual_start + head + 8);
|
||||||
@ -2968,7 +2966,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
|
|||||||
|
|
||||||
static int semaphore_passed(struct intel_engine_cs *engine)
|
static int semaphore_passed(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct intel_engine_cs *signaller;
|
struct intel_engine_cs *signaller;
|
||||||
u32 seqno;
|
u32 seqno;
|
||||||
|
|
||||||
@ -3010,7 +3008,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
|
|||||||
if (engine->id != RCS)
|
if (engine->id != RCS)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
i915_get_extra_instdone(engine->dev, instdone);
|
i915_get_extra_instdone(engine->i915, instdone);
|
||||||
|
|
||||||
/* There might be unstable subunit states even when
|
/* There might be unstable subunit states even when
|
||||||
* actual head is not moving. Filter out the unstable ones by
|
* actual head is not moving. Filter out the unstable ones by
|
||||||
@ -3051,8 +3049,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||||||
static enum intel_ring_hangcheck_action
|
static enum intel_ring_hangcheck_action
|
||||||
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
enum intel_ring_hangcheck_action ha;
|
enum intel_ring_hangcheck_action ha;
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
@ -3060,7 +3057,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||||||
if (ha != HANGCHECK_HUNG)
|
if (ha != HANGCHECK_HUNG)
|
||||||
return ha;
|
return ha;
|
||||||
|
|
||||||
if (IS_GEN2(dev))
|
if (IS_GEN2(dev_priv))
|
||||||
return HANGCHECK_HUNG;
|
return HANGCHECK_HUNG;
|
||||||
|
|
||||||
/* Is the chip hanging on a WAIT_FOR_EVENT?
|
/* Is the chip hanging on a WAIT_FOR_EVENT?
|
||||||
@ -3070,19 +3067,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||||||
*/
|
*/
|
||||||
tmp = I915_READ_CTL(engine);
|
tmp = I915_READ_CTL(engine);
|
||||||
if (tmp & RING_WAIT) {
|
if (tmp & RING_WAIT) {
|
||||||
i915_handle_error(dev, 0,
|
i915_handle_error(dev_priv, 0,
|
||||||
"Kicking stuck wait on %s",
|
"Kicking stuck wait on %s",
|
||||||
engine->name);
|
engine->name);
|
||||||
I915_WRITE_CTL(engine, tmp);
|
I915_WRITE_CTL(engine, tmp);
|
||||||
return HANGCHECK_KICK;
|
return HANGCHECK_KICK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
|
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
|
||||||
switch (semaphore_passed(engine)) {
|
switch (semaphore_passed(engine)) {
|
||||||
default:
|
default:
|
||||||
return HANGCHECK_HUNG;
|
return HANGCHECK_HUNG;
|
||||||
case 1:
|
case 1:
|
||||||
i915_handle_error(dev, 0,
|
i915_handle_error(dev_priv, 0,
|
||||||
"Kicking stuck semaphore on %s",
|
"Kicking stuck semaphore on %s",
|
||||||
engine->name);
|
engine->name);
|
||||||
I915_WRITE_CTL(engine, tmp);
|
I915_WRITE_CTL(engine, tmp);
|
||||||
@ -3097,7 +3094,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
|
|||||||
|
|
||||||
static unsigned kick_waiters(struct intel_engine_cs *engine)
|
static unsigned kick_waiters(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 = to_i915(engine->dev);
|
struct drm_i915_private *i915 = engine->i915;
|
||||||
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
|
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
|
||||||
|
|
||||||
if (engine->hangcheck.user_interrupts == user_interrupts &&
|
if (engine->hangcheck.user_interrupts == user_interrupts &&
|
||||||
@ -3126,7 +3123,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||||||
struct drm_i915_private *dev_priv =
|
struct drm_i915_private *dev_priv =
|
||||||
container_of(work, typeof(*dev_priv),
|
container_of(work, typeof(*dev_priv),
|
||||||
gpu_error.hangcheck_work.work);
|
gpu_error.hangcheck_work.work);
|
||||||
struct drm_device *dev = dev_priv->dev;
|
|
||||||
struct intel_engine_cs *engine;
|
struct intel_engine_cs *engine;
|
||||||
enum intel_engine_id id;
|
enum intel_engine_id id;
|
||||||
int busy_count = 0, rings_hung = 0;
|
int busy_count = 0, rings_hung = 0;
|
||||||
@ -3254,22 +3250,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rings_hung) {
|
if (rings_hung) {
|
||||||
i915_handle_error(dev, rings_hung, "Engine(s) hung");
|
i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (busy_count)
|
if (busy_count)
|
||||||
/* Reset timer case chip hangs without another request
|
/* Reset timer case chip hangs without another request
|
||||||
* being added */
|
* being added */
|
||||||
i915_queue_hangcheck(dev);
|
i915_queue_hangcheck(dev_priv);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_queue_hangcheck(struct drm_device *dev)
|
void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
|
struct i915_gpu_error *e = &dev_priv->gpu_error;
|
||||||
|
|
||||||
if (!i915.enable_hangcheck)
|
if (!i915.enable_hangcheck)
|
||||||
return;
|
return;
|
||||||
|
@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = from->dev->primary->index;
|
__entry->dev = from->i915->dev->primary->index;
|
||||||
__entry->sync_from = from->id;
|
__entry->sync_from = from->id;
|
||||||
__entry->sync_to = to_req->engine->id;
|
__entry->sync_to = to_req->engine->id;
|
||||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||||
@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
struct intel_engine_cs *engine =
|
__entry->dev = req->i915->dev->primary->index;
|
||||||
i915_gem_request_get_engine(req);
|
__entry->ring = req->engine->id;
|
||||||
__entry->dev = engine->dev->primary->index;
|
__entry->seqno = req->seqno;
|
||||||
__entry->ring = engine->id;
|
|
||||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
|
||||||
__entry->flags = flags;
|
__entry->flags = flags;
|
||||||
i915_trace_irq_get(engine, req);
|
i915_trace_irq_get(req->engine, req);
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
|
||||||
@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = req->engine->dev->primary->index;
|
__entry->dev = req->i915->dev->primary->index;
|
||||||
__entry->ring = req->engine->id;
|
__entry->ring = req->engine->id;
|
||||||
__entry->invalidate = invalidate;
|
__entry->invalidate = invalidate;
|
||||||
__entry->flush = flush;
|
__entry->flush = flush;
|
||||||
@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
struct intel_engine_cs *engine =
|
__entry->dev = req->i915->dev->primary->index;
|
||||||
i915_gem_request_get_engine(req);
|
__entry->ring = req->engine->id;
|
||||||
__entry->dev = engine->dev->primary->index;
|
__entry->seqno = req->seqno;
|
||||||
__entry->ring = engine->id;
|
|
||||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("dev=%u, ring=%u, seqno=%u",
|
TP_printk("dev=%u, ring=%u, seqno=%u",
|
||||||
@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
|
|||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = engine->dev->primary->index;
|
__entry->dev = engine->i915->dev->primary->index;
|
||||||
__entry->ring = engine->id;
|
__entry->ring = engine->id;
|
||||||
__entry->seqno = engine->get_seqno(engine);
|
__entry->seqno = engine->get_seqno(engine);
|
||||||
),
|
),
|
||||||
@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||||||
* less desirable.
|
* less desirable.
|
||||||
*/
|
*/
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
struct intel_engine_cs *engine =
|
__entry->dev = req->i915->dev->primary->index;
|
||||||
i915_gem_request_get_engine(req);
|
__entry->ring = req->engine->id;
|
||||||
__entry->dev = engine->dev->primary->index;
|
__entry->seqno = req->seqno;
|
||||||
__entry->ring = engine->id;
|
|
||||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
|
||||||
__entry->blocking =
|
__entry->blocking =
|
||||||
mutex_is_locked(&engine->dev->struct_mutex);
|
mutex_is_locked(&req->i915->dev->struct_mutex);
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
||||||
@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
|
|||||||
__entry->ring = engine->id;
|
__entry->ring = engine->id;
|
||||||
__entry->to = to;
|
__entry->to = to;
|
||||||
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
||||||
__entry->dev = engine->dev->primary->index;
|
__entry->dev = engine->i915->dev->primary->index;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
||||||
|
@ -3144,28 +3144,26 @@ static void intel_update_primary_planes(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_prepare_reset(struct drm_device *dev)
|
void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
/* no reset support for gen2 */
|
/* no reset support for gen2 */
|
||||||
if (IS_GEN2(dev))
|
if (IS_GEN2(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* reset doesn't touch the display */
|
/* reset doesn't touch the display */
|
||||||
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
|
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
drm_modeset_lock_all(dev);
|
drm_modeset_lock_all(dev_priv->dev);
|
||||||
/*
|
/*
|
||||||
* Disabling the crtcs gracefully seems nicer. Also the
|
* Disabling the crtcs gracefully seems nicer. Also the
|
||||||
* g33 docs say we should at least disable all the planes.
|
* g33 docs say we should at least disable all the planes.
|
||||||
*/
|
*/
|
||||||
intel_display_suspend(dev);
|
intel_display_suspend(dev_priv->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_finish_reset(struct drm_device *dev)
|
void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flips in the rings will be nuked by the reset,
|
* Flips in the rings will be nuked by the reset,
|
||||||
* so complete all pending flips so that user space
|
* so complete all pending flips so that user space
|
||||||
@ -3174,11 +3172,11 @@ void intel_finish_reset(struct drm_device *dev)
|
|||||||
intel_complete_page_flips(dev_priv);
|
intel_complete_page_flips(dev_priv);
|
||||||
|
|
||||||
/* no reset support for gen2 */
|
/* no reset support for gen2 */
|
||||||
if (IS_GEN2(dev))
|
if (IS_GEN2(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* reset doesn't touch the display */
|
/* reset doesn't touch the display */
|
||||||
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
|
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
|
||||||
/*
|
/*
|
||||||
* Flips in the rings have been nuked by the reset,
|
* Flips in the rings have been nuked by the reset,
|
||||||
* so update the base address of all primary
|
* so update the base address of all primary
|
||||||
@ -3188,7 +3186,7 @@ void intel_finish_reset(struct drm_device *dev)
|
|||||||
* FIXME: Atomic will make this obsolete since we won't schedule
|
* FIXME: Atomic will make this obsolete since we won't schedule
|
||||||
* CS-based flips (which might get lost in gpu resets) any more.
|
* CS-based flips (which might get lost in gpu resets) any more.
|
||||||
*/
|
*/
|
||||||
intel_update_primary_planes(dev);
|
intel_update_primary_planes(dev_priv->dev);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3199,18 +3197,18 @@ void intel_finish_reset(struct drm_device *dev)
|
|||||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||||
|
|
||||||
intel_modeset_init_hw(dev);
|
intel_modeset_init_hw(dev_priv->dev);
|
||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
if (dev_priv->display.hpd_irq_setup)
|
if (dev_priv->display.hpd_irq_setup)
|
||||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
|
|
||||||
intel_display_resume(dev);
|
intel_display_resume(dev_priv->dev);
|
||||||
|
|
||||||
intel_hpd_init(dev_priv);
|
intel_hpd_init(dev_priv);
|
||||||
|
|
||||||
drm_modeset_unlock_all(dev);
|
drm_modeset_unlock_all(dev_priv->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
||||||
@ -11255,7 +11253,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
|
|||||||
if (engine == NULL)
|
if (engine == NULL)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (INTEL_INFO(engine->dev)->gen < 5)
|
if (INTEL_GEN(engine->i915) < 5)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (i915.use_mmio_flip < 0)
|
if (i915.use_mmio_flip < 0)
|
||||||
@ -16187,9 +16185,8 @@ struct intel_display_error_state {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct intel_display_error_state *
|
struct intel_display_error_state *
|
||||||
intel_display_capture_error_state(struct drm_device *dev)
|
intel_display_capture_error_state(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_display_error_state *error;
|
struct intel_display_error_state *error;
|
||||||
int transcoders[] = {
|
int transcoders[] = {
|
||||||
TRANSCODER_A,
|
TRANSCODER_A,
|
||||||
@ -16199,14 +16196,14 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||||||
};
|
};
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->num_pipes == 0)
|
if (INTEL_INFO(dev_priv)->num_pipes == 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
error = kzalloc(sizeof(*error), GFP_ATOMIC);
|
||||||
if (error == NULL)
|
if (error == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||||
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
|
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||||
|
|
||||||
for_each_pipe(dev_priv, i) {
|
for_each_pipe(dev_priv, i) {
|
||||||
@ -16222,25 +16219,25 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||||||
|
|
||||||
error->plane[i].control = I915_READ(DSPCNTR(i));
|
error->plane[i].control = I915_READ(DSPCNTR(i));
|
||||||
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
|
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
|
||||||
if (INTEL_INFO(dev)->gen <= 3) {
|
if (INTEL_GEN(dev_priv) <= 3) {
|
||||||
error->plane[i].size = I915_READ(DSPSIZE(i));
|
error->plane[i].size = I915_READ(DSPSIZE(i));
|
||||||
error->plane[i].pos = I915_READ(DSPPOS(i));
|
error->plane[i].pos = I915_READ(DSPPOS(i));
|
||||||
}
|
}
|
||||||
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
|
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
|
||||||
error->plane[i].addr = I915_READ(DSPADDR(i));
|
error->plane[i].addr = I915_READ(DSPADDR(i));
|
||||||
if (INTEL_INFO(dev)->gen >= 4) {
|
if (INTEL_GEN(dev_priv) >= 4) {
|
||||||
error->plane[i].surface = I915_READ(DSPSURF(i));
|
error->plane[i].surface = I915_READ(DSPSURF(i));
|
||||||
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
|
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
error->pipe[i].source = I915_READ(PIPESRC(i));
|
error->pipe[i].source = I915_READ(PIPESRC(i));
|
||||||
|
|
||||||
if (HAS_GMCH_DISPLAY(dev))
|
if (HAS_GMCH_DISPLAY(dev_priv))
|
||||||
error->pipe[i].stat = I915_READ(PIPESTAT(i));
|
error->pipe[i].stat = I915_READ(PIPESTAT(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: this does not include DSI transcoders. */
|
/* Note: this does not include DSI transcoders. */
|
||||||
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
|
error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
|
||||||
if (HAS_DDI(dev_priv))
|
if (HAS_DDI(dev_priv))
|
||||||
error->num_transcoders++; /* Account for eDP. */
|
error->num_transcoders++; /* Account for eDP. */
|
||||||
|
|
||||||
|
@ -1231,8 +1231,8 @@ u32 intel_compute_tile_offset(int *x, int *y,
|
|||||||
const struct drm_framebuffer *fb, int plane,
|
const struct drm_framebuffer *fb, int plane,
|
||||||
unsigned int pitch,
|
unsigned int pitch,
|
||||||
unsigned int rotation);
|
unsigned int rotation);
|
||||||
void intel_prepare_reset(struct drm_device *dev);
|
void intel_prepare_reset(struct drm_i915_private *dev_priv);
|
||||||
void intel_finish_reset(struct drm_device *dev);
|
void intel_finish_reset(struct drm_i915_private *dev_priv);
|
||||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||||
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
|
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
|
||||||
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
|
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
|
||||||
|
@ -827,7 +827,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
|
|||||||
bool enable_by_default = IS_HASWELL(dev_priv) ||
|
bool enable_by_default = IS_HASWELL(dev_priv) ||
|
||||||
IS_BROADWELL(dev_priv);
|
IS_BROADWELL(dev_priv);
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv->dev)) {
|
if (intel_vgpu_active(dev_priv)) {
|
||||||
fbc->no_fbc_reason = "VGPU is active";
|
fbc->no_fbc_reason = "VGPU is active";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -246,21 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
|
|||||||
*
|
*
|
||||||
* Return: 1 if Execlists is supported and has to be enabled.
|
* Return: 1 if Execlists is supported and has to be enabled.
|
||||||
*/
|
*/
|
||||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
|
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
|
||||||
{
|
{
|
||||||
/* On platforms with execlist available, vGPU will only
|
/* On platforms with execlist available, vGPU will only
|
||||||
* support execlist mode, no ring buffer mode.
|
* support execlist mode, no ring buffer mode.
|
||||||
*/
|
*/
|
||||||
if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
|
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->gen >= 9)
|
if (INTEL_GEN(dev_priv) >= 9)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (enable_execlists == 0)
|
if (enable_execlists == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
|
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
|
||||||
|
USES_PPGTT(dev_priv) &&
|
||||||
i915.use_mmio_flip >= 0)
|
i915.use_mmio_flip >= 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
@ -270,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
|
|||||||
static void
|
static void
|
||||||
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
|
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
|
|
||||||
if (IS_GEN8(dev) || IS_GEN9(dev))
|
if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
|
||||||
engine->idle_lite_restore_wa = ~0;
|
engine->idle_lite_restore_wa = ~0;
|
||||||
|
|
||||||
engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
|
||||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
|
IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
|
||||||
(engine->id == VCS || engine->id == VCS2);
|
(engine->id == VCS || engine->id == VCS2);
|
||||||
|
|
||||||
engine->ctx_desc_template = GEN8_CTX_VALID;
|
engine->ctx_desc_template = GEN8_CTX_VALID;
|
||||||
engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
|
engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
|
||||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||||
if (IS_GEN8(dev))
|
if (IS_GEN8(dev_priv))
|
||||||
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
|
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
|
||||||
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
|
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
|
||||||
|
|
||||||
@ -342,8 +343,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
|
|||||||
{
|
{
|
||||||
|
|
||||||
struct intel_engine_cs *engine = rq0->engine;
|
struct intel_engine_cs *engine = rq0->engine;
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = rq0->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
uint64_t desc[2];
|
uint64_t desc[2];
|
||||||
|
|
||||||
if (rq1) {
|
if (rq1) {
|
||||||
@ -425,7 +425,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
|
|||||||
* If irqs are not active generate a warning as batches that finish
|
* If irqs are not active generate a warning as batches that finish
|
||||||
* without the irqs may get lost and a GPU Hang may occur.
|
* without the irqs may get lost and a GPU Hang may occur.
|
||||||
*/
|
*/
|
||||||
WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
|
WARN_ON(!intel_irqs_enabled(engine->i915));
|
||||||
|
|
||||||
/* Try to read in pairs */
|
/* Try to read in pairs */
|
||||||
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
|
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
|
||||||
@ -497,7 +497,7 @@ static u32
|
|||||||
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
|
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
|
||||||
u32 *context_id)
|
u32 *context_id)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
u32 status;
|
u32 status;
|
||||||
|
|
||||||
read_pointer %= GEN8_CSB_ENTRIES;
|
read_pointer %= GEN8_CSB_ENTRIES;
|
||||||
@ -523,7 +523,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
|
|||||||
static void intel_lrc_irq_handler(unsigned long data)
|
static void intel_lrc_irq_handler(unsigned long data)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
u32 status_pointer;
|
u32 status_pointer;
|
||||||
unsigned int read_pointer, write_pointer;
|
unsigned int read_pointer, write_pointer;
|
||||||
u32 csb[GEN8_CSB_ENTRIES][2];
|
u32 csb[GEN8_CSB_ENTRIES][2];
|
||||||
@ -884,7 +884,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||||||
struct drm_i915_gem_request *req, *tmp;
|
struct drm_i915_gem_request *req, *tmp;
|
||||||
LIST_HEAD(cancel_list);
|
LIST_HEAD(cancel_list);
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
|
WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
|
||||||
|
|
||||||
spin_lock_bh(&engine->execlist_lock);
|
spin_lock_bh(&engine->execlist_lock);
|
||||||
list_replace_init(&engine->execlist_queue, &cancel_list);
|
list_replace_init(&engine->execlist_queue, &cancel_list);
|
||||||
@ -898,7 +898,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
void intel_logical_ring_stop(struct intel_engine_cs *engine)
|
void intel_logical_ring_stop(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!intel_engine_initialized(engine))
|
if (!intel_engine_initialized(engine))
|
||||||
@ -964,7 +964,7 @@ static int intel_lr_context_pin(struct intel_context *ctx,
|
|||||||
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||||
|
|
||||||
ringbuf = ctx->engine[engine->id].ringbuf;
|
ringbuf = ctx->engine[engine->id].ringbuf;
|
||||||
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
|
ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unpin_map;
|
goto unpin_map;
|
||||||
|
|
||||||
@ -1019,9 +1019,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
|||||||
int ret, i;
|
int ret, i;
|
||||||
struct intel_engine_cs *engine = req->engine;
|
struct intel_engine_cs *engine = req->engine;
|
||||||
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
struct intel_ringbuffer *ringbuf = req->ringbuf;
|
||||||
struct drm_device *dev = engine->dev;
|
struct i915_workarounds *w = &req->i915->workarounds;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct i915_workarounds *w = &dev_priv->workarounds;
|
|
||||||
|
|
||||||
if (w->count == 0)
|
if (w->count == 0)
|
||||||
return 0;
|
return 0;
|
||||||
@ -1092,7 +1090,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
|
|||||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||||
* set this bit here to retain the WA during flush.
|
* set this bit here to retain the WA during flush.
|
||||||
*/
|
*/
|
||||||
if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
|
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
|
||||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||||
|
|
||||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||||
@ -1181,7 +1179,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
|
|||||||
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||||
|
|
||||||
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
|
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
|
||||||
if (IS_BROADWELL(engine->dev)) {
|
if (IS_BROADWELL(engine->i915)) {
|
||||||
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
|
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
@ -1253,12 +1251,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
|
|||||||
uint32_t *offset)
|
uint32_t *offset)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct drm_device *dev = engine->dev;
|
|
||||||
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
||||||
|
|
||||||
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
||||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
|
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
|
||||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
|
||||||
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||||
|
|
||||||
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
|
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
|
||||||
@ -1279,12 +1276,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
|
|||||||
uint32_t *const batch,
|
uint32_t *const batch,
|
||||||
uint32_t *offset)
|
uint32_t *offset)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
|
||||||
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
|
||||||
|
|
||||||
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
|
||||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
|
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
|
||||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
|
IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
|
||||||
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
|
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
|
||||||
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
|
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
|
||||||
wa_ctx_emit(batch, index,
|
wa_ctx_emit(batch, index,
|
||||||
@ -1293,7 +1289,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* WaClearTdlStateAckDirtyBits:bxt */
|
/* WaClearTdlStateAckDirtyBits:bxt */
|
||||||
if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
|
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
|
||||||
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
|
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
|
||||||
|
|
||||||
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
|
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
|
||||||
@ -1312,8 +1308,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
/* WaDisableCtxRestoreArbitration:skl,bxt */
|
||||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
|
if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
|
||||||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
|
IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
|
||||||
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||||
|
|
||||||
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
|
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
|
||||||
@ -1325,7 +1321,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
engine->wa_ctx.obj = i915_gem_object_create(engine->dev,
|
engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
|
||||||
PAGE_ALIGN(size));
|
PAGE_ALIGN(size));
|
||||||
if (IS_ERR(engine->wa_ctx.obj)) {
|
if (IS_ERR(engine->wa_ctx.obj)) {
|
||||||
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
|
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
|
||||||
@ -1365,9 +1361,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||||||
WARN_ON(engine->id != RCS);
|
WARN_ON(engine->id != RCS);
|
||||||
|
|
||||||
/* update this when WA for higher Gen are added */
|
/* update this when WA for higher Gen are added */
|
||||||
if (INTEL_INFO(engine->dev)->gen > 9) {
|
if (INTEL_GEN(engine->i915) > 9) {
|
||||||
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
|
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
|
||||||
INTEL_INFO(engine->dev)->gen);
|
INTEL_GEN(engine->i915));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1387,7 +1383,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||||||
batch = kmap_atomic(page);
|
batch = kmap_atomic(page);
|
||||||
offset = 0;
|
offset = 0;
|
||||||
|
|
||||||
if (INTEL_INFO(engine->dev)->gen == 8) {
|
if (IS_GEN8(engine->i915)) {
|
||||||
ret = gen8_init_indirectctx_bb(engine,
|
ret = gen8_init_indirectctx_bb(engine,
|
||||||
&wa_ctx->indirect_ctx,
|
&wa_ctx->indirect_ctx,
|
||||||
batch,
|
batch,
|
||||||
@ -1401,7 +1397,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||||||
&offset);
|
&offset);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
} else if (INTEL_INFO(engine->dev)->gen == 9) {
|
} else if (IS_GEN9(engine->i915)) {
|
||||||
ret = gen9_init_indirectctx_bb(engine,
|
ret = gen9_init_indirectctx_bb(engine,
|
||||||
&wa_ctx->indirect_ctx,
|
&wa_ctx->indirect_ctx,
|
||||||
batch,
|
batch,
|
||||||
@ -1427,7 +1423,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static void lrc_init_hws(struct intel_engine_cs *engine)
|
static void lrc_init_hws(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
|
|
||||||
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
|
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
|
||||||
(u32)engine->status_page.gfx_addr);
|
(u32)engine->status_page.gfx_addr);
|
||||||
@ -1436,8 +1432,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
unsigned int next_context_status_buffer_hw;
|
unsigned int next_context_status_buffer_hw;
|
||||||
|
|
||||||
lrc_init_hws(engine);
|
lrc_init_hws(engine);
|
||||||
@ -1484,8 +1479,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static int gen8_init_render_ring(struct intel_engine_cs *engine)
|
static int gen8_init_render_ring(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = gen8_init_common_ring(engine);
|
ret = gen8_init_common_ring(engine);
|
||||||
@ -1562,7 +1556,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||||||
if (req->ctx->ppgtt &&
|
if (req->ctx->ppgtt &&
|
||||||
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
|
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
|
||||||
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
|
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
|
||||||
!intel_vgpu_active(req->i915->dev)) {
|
!intel_vgpu_active(req->i915)) {
|
||||||
ret = intel_logical_ring_emit_pdps(req);
|
ret = intel_logical_ring_emit_pdps(req);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1590,8 +1584,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
|
|||||||
|
|
||||||
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
|
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
@ -1610,8 +1603,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
|
static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
@ -1628,8 +1620,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
|
|||||||
{
|
{
|
||||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||||
struct intel_engine_cs *engine = ringbuf->engine;
|
struct intel_engine_cs *engine = ringbuf->engine;
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = request->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
uint32_t cmd;
|
uint32_t cmd;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1697,7 +1688,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
|
|||||||
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
|
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
|
||||||
* pipe control.
|
* pipe control.
|
||||||
*/
|
*/
|
||||||
if (IS_GEN9(engine->dev))
|
if (IS_GEN9(request->i915))
|
||||||
vf_flush_wa = true;
|
vf_flush_wa = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1890,7 +1881,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
|||||||
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
|
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
|
||||||
tasklet_kill(&engine->irq_tasklet);
|
tasklet_kill(&engine->irq_tasklet);
|
||||||
|
|
||||||
dev_priv = engine->dev->dev_private;
|
dev_priv = engine->i915;
|
||||||
|
|
||||||
if (engine->buffer) {
|
if (engine->buffer) {
|
||||||
intel_logical_ring_stop(engine);
|
intel_logical_ring_stop(engine);
|
||||||
@ -1914,7 +1905,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
|||||||
engine->ctx_desc_template = 0;
|
engine->ctx_desc_template = 0;
|
||||||
|
|
||||||
lrc_destroy_wa_ctx_obj(engine);
|
lrc_destroy_wa_ctx_obj(engine);
|
||||||
engine->dev = NULL;
|
engine->i915 = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1929,7 +1920,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||||||
engine->emit_bb_start = gen8_emit_bb_start;
|
engine->emit_bb_start = gen8_emit_bb_start;
|
||||||
engine->get_seqno = gen8_get_seqno;
|
engine->get_seqno = gen8_get_seqno;
|
||||||
engine->set_seqno = gen8_set_seqno;
|
engine->set_seqno = gen8_set_seqno;
|
||||||
if (IS_BXT_REVID(engine->dev, 0, BXT_REVID_A1)) {
|
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
|
||||||
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
|
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
|
||||||
engine->set_seqno = bxt_a_set_seqno;
|
engine->set_seqno = bxt_a_set_seqno;
|
||||||
}
|
}
|
||||||
@ -2019,7 +2010,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
|
|||||||
engine->guc_id = info->guc_id;
|
engine->guc_id = info->guc_id;
|
||||||
engine->mmio_base = info->mmio_base;
|
engine->mmio_base = info->mmio_base;
|
||||||
|
|
||||||
engine->dev = dev;
|
engine->i915 = dev_priv;
|
||||||
|
|
||||||
/* Intentionally left blank. */
|
/* Intentionally left blank. */
|
||||||
engine->buffer = NULL;
|
engine->buffer = NULL;
|
||||||
@ -2052,7 +2043,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
|
|||||||
logical_ring_default_irqs(engine, info->irq_shift);
|
logical_ring_default_irqs(engine, info->irq_shift);
|
||||||
|
|
||||||
intel_engine_init_hangcheck(engine);
|
intel_engine_init_hangcheck(engine);
|
||||||
i915_gem_batch_pool_init(engine->dev, &engine->batch_pool);
|
i915_gem_batch_pool_init(dev, &engine->batch_pool);
|
||||||
|
|
||||||
return engine;
|
return engine;
|
||||||
}
|
}
|
||||||
@ -2060,7 +2051,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
|
|||||||
static int
|
static int
|
||||||
logical_ring_init(struct intel_engine_cs *engine)
|
logical_ring_init(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct intel_context *dctx = to_i915(engine->dev)->kernel_context;
|
struct intel_context *dctx = engine->i915->kernel_context;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_cmd_parser_init_ring(engine);
|
ret = i915_cmd_parser_init_ring(engine);
|
||||||
@ -2220,7 +2211,7 @@ int intel_logical_rings_init(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static u32
|
static u32
|
||||||
make_rpcs(struct drm_device *dev)
|
make_rpcs(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 rpcs = 0;
|
u32 rpcs = 0;
|
||||||
|
|
||||||
@ -2228,7 +2219,7 @@ make_rpcs(struct drm_device *dev)
|
|||||||
* No explicit RPCS request is needed to ensure full
|
* No explicit RPCS request is needed to ensure full
|
||||||
* slice/subslice/EU enablement prior to Gen9.
|
* slice/subslice/EU enablement prior to Gen9.
|
||||||
*/
|
*/
|
||||||
if (INTEL_INFO(dev)->gen < 9)
|
if (INTEL_GEN(dev_priv) < 9)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2237,24 +2228,24 @@ make_rpcs(struct drm_device *dev)
|
|||||||
* must make an explicit request through RPCS for full
|
* must make an explicit request through RPCS for full
|
||||||
* enablement.
|
* enablement.
|
||||||
*/
|
*/
|
||||||
if (INTEL_INFO(dev)->has_slice_pg) {
|
if (INTEL_INFO(dev_priv)->has_slice_pg) {
|
||||||
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
|
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
|
||||||
rpcs |= INTEL_INFO(dev)->slice_total <<
|
rpcs |= INTEL_INFO(dev_priv)->slice_total <<
|
||||||
GEN8_RPCS_S_CNT_SHIFT;
|
GEN8_RPCS_S_CNT_SHIFT;
|
||||||
rpcs |= GEN8_RPCS_ENABLE;
|
rpcs |= GEN8_RPCS_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->has_subslice_pg) {
|
if (INTEL_INFO(dev_priv)->has_subslice_pg) {
|
||||||
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
|
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
|
||||||
rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
|
rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
|
||||||
GEN8_RPCS_SS_CNT_SHIFT;
|
GEN8_RPCS_SS_CNT_SHIFT;
|
||||||
rpcs |= GEN8_RPCS_ENABLE;
|
rpcs |= GEN8_RPCS_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_INFO(dev)->has_eu_pg) {
|
if (INTEL_INFO(dev_priv)->has_eu_pg) {
|
||||||
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
|
rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
|
||||||
GEN8_RPCS_EU_MIN_SHIFT;
|
GEN8_RPCS_EU_MIN_SHIFT;
|
||||||
rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
|
rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
|
||||||
GEN8_RPCS_EU_MAX_SHIFT;
|
GEN8_RPCS_EU_MAX_SHIFT;
|
||||||
rpcs |= GEN8_RPCS_ENABLE;
|
rpcs |= GEN8_RPCS_ENABLE;
|
||||||
}
|
}
|
||||||
@ -2266,9 +2257,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
|
|||||||
{
|
{
|
||||||
u32 indirect_ctx_offset;
|
u32 indirect_ctx_offset;
|
||||||
|
|
||||||
switch (INTEL_INFO(engine->dev)->gen) {
|
switch (INTEL_GEN(engine->i915)) {
|
||||||
default:
|
default:
|
||||||
MISSING_CASE(INTEL_INFO(engine->dev)->gen);
|
MISSING_CASE(INTEL_GEN(engine->i915));
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case 9:
|
case 9:
|
||||||
indirect_ctx_offset =
|
indirect_ctx_offset =
|
||||||
@ -2289,8 +2280,7 @@ populate_lr_context(struct intel_context *ctx,
|
|||||||
struct intel_engine_cs *engine,
|
struct intel_engine_cs *engine,
|
||||||
struct intel_ringbuffer *ringbuf)
|
struct intel_ringbuffer *ringbuf)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
struct drm_i915_private *dev_priv = ctx->i915;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
u32 *reg_state;
|
u32 *reg_state;
|
||||||
@ -2328,7 +2318,7 @@ populate_lr_context(struct intel_context *ctx,
|
|||||||
RING_CONTEXT_CONTROL(engine),
|
RING_CONTEXT_CONTROL(engine),
|
||||||
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
|
||||||
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
|
||||||
(HAS_RESOURCE_STREAMER(dev) ?
|
(HAS_RESOURCE_STREAMER(dev_priv) ?
|
||||||
CTX_CTRL_RS_CTX_ENABLE : 0)));
|
CTX_CTRL_RS_CTX_ENABLE : 0)));
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
|
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
|
||||||
0);
|
0);
|
||||||
@ -2417,7 +2407,7 @@ populate_lr_context(struct intel_context *ctx,
|
|||||||
if (engine->id == RCS) {
|
if (engine->id == RCS) {
|
||||||
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
|
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
|
||||||
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
|
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
|
||||||
make_rpcs(dev));
|
make_rpcs(dev_priv));
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_object_unpin_map(ctx_obj);
|
i915_gem_object_unpin_map(ctx_obj);
|
||||||
@ -2468,11 +2458,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
|
WARN_ON(INTEL_GEN(engine->i915) < 8);
|
||||||
|
|
||||||
switch (engine->id) {
|
switch (engine->id) {
|
||||||
case RCS:
|
case RCS:
|
||||||
if (INTEL_INFO(engine->dev)->gen >= 9)
|
if (INTEL_GEN(engine->i915) >= 9)
|
||||||
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
|
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
|
||||||
else
|
else
|
||||||
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
|
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
|
||||||
@ -2504,7 +2494,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
|
|||||||
static int execlists_context_deferred_alloc(struct intel_context *ctx,
|
static int execlists_context_deferred_alloc(struct intel_context *ctx,
|
||||||
struct intel_engine_cs *engine)
|
struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = engine->dev;
|
|
||||||
struct drm_i915_gem_object *ctx_obj;
|
struct drm_i915_gem_object *ctx_obj;
|
||||||
uint32_t context_size;
|
uint32_t context_size;
|
||||||
struct intel_ringbuffer *ringbuf;
|
struct intel_ringbuffer *ringbuf;
|
||||||
@ -2518,7 +2507,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ctx,
|
|||||||
/* One extra page as the sharing data between driver and GuC */
|
/* One extra page as the sharing data between driver and GuC */
|
||||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||||
|
|
||||||
ctx_obj = i915_gem_object_create(dev, context_size);
|
ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
|
||||||
if (IS_ERR(ctx_obj)) {
|
if (IS_ERR(ctx_obj)) {
|
||||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||||
return PTR_ERR(ctx_obj);
|
return PTR_ERR(ctx_obj);
|
||||||
|
@ -112,7 +112,8 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
|
|||||||
struct intel_engine_cs *engine);
|
struct intel_engine_cs *engine);
|
||||||
|
|
||||||
/* Execlists */
|
/* Execlists */
|
||||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
|
||||||
|
int enable_execlists);
|
||||||
struct i915_execbuffer_params;
|
struct i915_execbuffer_params;
|
||||||
int intel_execlists_submission(struct i915_execbuffer_params *params,
|
int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||||
struct drm_i915_gem_execbuffer2 *args,
|
struct drm_i915_gem_execbuffer2 *args,
|
||||||
|
@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
|
|||||||
*/
|
*/
|
||||||
int intel_mocs_init_engine(struct intel_engine_cs *engine)
|
int intel_mocs_init_engine(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
struct drm_i915_mocs_table table;
|
struct drm_i915_mocs_table table;
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
|
||||||
|
@ -1508,9 +1508,8 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
|
|||||||
|
|
||||||
|
|
||||||
struct intel_overlay_error_state *
|
struct intel_overlay_error_state *
|
||||||
intel_overlay_capture_error_state(struct drm_device *dev)
|
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct intel_overlay *overlay = dev_priv->overlay;
|
struct intel_overlay *overlay = dev_priv->overlay;
|
||||||
struct intel_overlay_error_state *error;
|
struct intel_overlay_error_state *error;
|
||||||
struct overlay_registers __iomem *regs;
|
struct overlay_registers __iomem *regs;
|
||||||
|
@ -6344,7 +6344,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
/* Powersaving is controlled by the host when inside a VM */
|
/* Powersaving is controlled by the host when inside a VM */
|
||||||
if (intel_vgpu_active(dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (IS_IRONLAKE_M(dev)) {
|
if (IS_IRONLAKE_M(dev)) {
|
||||||
@ -7400,8 +7400,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
|
|||||||
struct drm_i915_gem_request *req = boost->req;
|
struct drm_i915_gem_request *req = boost->req;
|
||||||
|
|
||||||
if (!i915_gem_request_completed(req, true))
|
if (!i915_gem_request_completed(req, true))
|
||||||
gen6_rps_boost(to_i915(req->engine->dev), NULL,
|
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
|
||||||
req->emitted_jiffies);
|
|
||||||
|
|
||||||
i915_gem_request_unreference(req);
|
i915_gem_request_unreference(req);
|
||||||
kfree(boost);
|
kfree(boost);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -141,7 +141,8 @@ struct i915_ctx_workarounds {
|
|||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_engine_cs {
|
struct intel_engine_cs {
|
||||||
|
struct drm_i915_private *i915;
|
||||||
const char *name;
|
const char *name;
|
||||||
enum intel_engine_id {
|
enum intel_engine_id {
|
||||||
RCS = 0,
|
RCS = 0,
|
||||||
@ -156,7 +157,6 @@ struct intel_engine_cs {
|
|||||||
unsigned int hw_id;
|
unsigned int hw_id;
|
||||||
unsigned int guc_id; /* XXX same as hw_id? */
|
unsigned int guc_id; /* XXX same as hw_id? */
|
||||||
u32 mmio_base;
|
u32 mmio_base;
|
||||||
struct drm_device *dev;
|
|
||||||
struct intel_ringbuffer *buffer;
|
struct intel_ringbuffer *buffer;
|
||||||
struct list_head buffers;
|
struct list_head buffers;
|
||||||
|
|
||||||
@ -350,7 +350,7 @@ struct intel_engine_cs {
|
|||||||
static inline bool
|
static inline bool
|
||||||
intel_engine_initialized(struct intel_engine_cs *engine)
|
intel_engine_initialized(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
return engine->dev != NULL;
|
return engine->i915 != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned
|
static inline unsigned
|
||||||
@ -425,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
|
|||||||
|
|
||||||
struct intel_ringbuffer *
|
struct intel_ringbuffer *
|
||||||
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
|
||||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
|
||||||
struct intel_ringbuffer *ringbuf);
|
struct intel_ringbuffer *ringbuf);
|
||||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||||
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
|
||||||
|
@ -1381,7 +1381,7 @@ void intel_uncore_init(struct drm_device *dev)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (intel_vgpu_active(dev)) {
|
if (intel_vgpu_active(dev_priv)) {
|
||||||
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
|
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
|
||||||
ASSIGN_READ_MMIO_VFUNCS(vgpu);
|
ASSIGN_READ_MMIO_VFUNCS(vgpu);
|
||||||
}
|
}
|
||||||
@ -1663,8 +1663,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
int ret;
|
int ret;
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
|
||||||
|
|
||||||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||||
@ -1682,7 +1682,7 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
|
|||||||
|
|
||||||
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
|
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
|
|
||||||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||||
@ -1802,10 +1802,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
|
|||||||
{
|
{
|
||||||
enum forcewake_domains fw_domains;
|
enum forcewake_domains fw_domains;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv->dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (INTEL_INFO(dev_priv)->gen) {
|
switch (INTEL_GEN(dev_priv)) {
|
||||||
case 9:
|
case 9:
|
||||||
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
|
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
|
||||||
break;
|
break;
|
||||||
@ -1842,10 +1842,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
|
|||||||
{
|
{
|
||||||
enum forcewake_domains fw_domains;
|
enum forcewake_domains fw_domains;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv->dev))
|
if (intel_vgpu_active(dev_priv))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (INTEL_INFO(dev_priv)->gen) {
|
switch (INTEL_GEN(dev_priv)) {
|
||||||
case 9:
|
case 9:
|
||||||
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
|
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user