drm fixes for 5.4

amdgpu:
 - Remove experimental flag for navi14
 - Fix confusing power message failures on older VI parts
 - Hang fix for gfxoff when using the read register interface
 - Two stability regression fixes for Raven
 
 i915:
 - Fix kernel oops on dumb_create ioctl on no crtc situation
 - Fix bad ugly colored flash on VLV/CHV related to gamma LUT update
 - Fix unity of the frequencies reported on PMU
 - Fix kernel oops on set_page_dirty using better locks around it
 - Protect the request pointer with RCU to prevent it being freed while we might need still
 - Make pool objects read-only
 - Restore physical addresses for fb_map to avoid corrupted page table
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJd1y5MAAoJEAx081l5xIa+HMUP/3G2QKUMN6hqno7EZQjJPhWs
 UFaVHhwGtWdwyntwUtSxkZEvj1+1InSw2x/vhBdBghFZ4j5yO63v1pe5EkvAZ4Ff
 Yg2z0vhl0tqQ7w4upe5C8TYztBvGqInMdbt76fIAB8jXM10lpPDZ98eVmVTotvCE
 6KHzP7/xW4l53SRnptMM+pHrRlaoc4NpLkTPexpJ7woZv07ktG5mGw1/+kEuHi5q
 qrg9PKOg3z3Q77yq4dxcH3ET+IL9I34A5WktTXCd1xIowdfzkC7rpr5DVHaVjUqf
 Io0Z+ca54enUtfLs5S0pbgmLll9SQ8GMSP1BuwLmZx5Xxqm0TM4paQ0lZV2mVa7X
 izWYni+eIIINMqeIfo8xtegZ2YUAz2unx+hy4HK5VGgnTalqidsgfQ+sFVScPsva
 3DvgA6Hrz4AiYCen1s01DMuJLi/O7BmxwLwrKgpS3XWYVZKpmLKZqdZpkwLBQC7T
 OvluGCLyFsaZj/FAht6l4AqotcWURHd/ujOmK5AxEEelTT1fMAy1KjyiwouWTEIw
 7HCRd2yGkqUt3ShKABwTEh0+m4kub+Id3CaXCcu2Ce+bmSoseq564XPXrRzK7eUX
 KuuTuj9opXvhHLBsVVGkHs72xWNgc7MoVORWxfDmSGRrD5a93tbltlNmuy+PCAKQ
 FY3YQmvWYcLenZAgJ4VX
 =0Z0x
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2019-11-22' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Two sets of fixes in here, one for amdgpu, and one for i915.

  The amdgpu ones are pretty small, i915's CI system seems to have a few
  problems in the last week or so, there is one major regression fix for
  fb_mmap, but there are a bunch of other issues fixed in there as well,
  oops, screen flashes and rcu related.

  amdgpu:
   - Remove experimental flag for navi14
   - Fix confusing power message failures on older VI parts
   - Hang fix for gfxoff when using the read register interface
   - Two stability regression fixes for Raven

  i915:
   - Fix kernel oops on dumb_create ioctl on no crtc situation
   - Fix bad ugly colored flash on VLV/CHV related to gamma LUT update
   - Fix unity of the frequencies reported on PMU
   - Fix kernel oops on set_page_dirty using better locks around it
   - Protect the request pointer with RCU to prevent it being freed
     while we might need still
   - Make pool objects read-only
   - Restore physical addresses for fb_map to avoid corrupted page
     table"

* tag 'drm-fixes-2019-11-22' of git://anongit.freedesktop.org/drm/drm:
  drm/i915/fbdev: Restore physical addresses for fb_mmap()
  Revert "drm/amd/display: enable S/G for RAVEN chip"
  drm/amdgpu: disable gfxoff on original raven
  drm/amdgpu: disable gfxoff when using register read interface
  drm/amd/powerplay: correct fine grained dpm force level setting
  drm/amd/powerplay: issue no PPSMC_MSG_GetCurrPkgPwr on unsupported ASICs
  drm/amdgpu: remove experimental flag for Navi14
  drm/i915: make pool objects read-only
  drm/i915: Protect request peeking with RCU
  drm/i915/userptr: Try to acquire the page lock around set_page_dirty()
  drm/i915/pmu: "Frequency" is reported as accumulated cycles
  drm/i915: Preload LUTs if the hw isn't currently using them
  drm/i915: Don't oops in dumb_create ioctl if we have no crtcs
This commit is contained in:
Linus Torvalds 2019-11-22 09:14:30 -08:00
commit 5d867ab037
16 changed files with 183 additions and 32 deletions

View File

@ -511,7 +511,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set. * Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/ */
if (adev->asic_type >= CHIP_CARRIZO && if (adev->asic_type >= CHIP_CARRIZO &&
adev->asic_type <= CHIP_RAVEN && adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) && (adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) && amdgpu_bo_support_uswc(bo_flags) &&

View File

@ -1013,10 +1013,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */ /* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */ /* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},

View File

@ -649,15 +649,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM; return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs); alloc_size = info->read_mmr_reg.count * sizeof(*regs);
for (i = 0; i < info->read_mmr_reg.count; i++) amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num, if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i, info->read_mmr_reg.dword_offset + i,
&regs[i])) { &regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n", DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i); info->read_mmr_reg.dword_offset + i);
kfree(regs); kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT; return -EFAULT;
} }
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size)); n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs); kfree(regs);
return n ? -EFAULT : 0; return n ? -EFAULT : 0;

View File

@ -1038,8 +1038,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20: case CHIP_VEGA20:
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) /* Disable GFXOFF on original raven. There are combinations
&&((adev->gfx.rlc_fw_version != 106 && * of sbios and platforms that are not stable.
*/
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
&&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) || adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) || (adev->gfx.rlc_feature_version < 1) ||

View File

@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/ */
if (adev->flags & AMD_IS_APU && if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO && adev->asic_type >= CHIP_CARRIZO &&
adev->asic_type <= CHIP_RAVEN) adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true; init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK) if (amdgpu_dc_feature_mask & DC_FBC_MASK)

View File

@ -3478,18 +3478,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{ {
struct amdgpu_device *adev = hwmgr->adev;
int i; int i;
u32 tmp = 0; u32 tmp = 0;
if (!query) if (!query)
return -EINVAL; return -EINVAL;
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); /*
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); * PPSMC_MSG_GetCurrPkgPwr is not supported on:
*query = tmp; * - Hawaii
* - Bonaire
* - Fiji
* - Tonga
*/
if ((adev->asic_type != CHIP_HAWAII) &&
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*query = tmp;
if (tmp != 0) if (tmp != 0)
return 0; return 0;
}
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,

View File

@ -759,6 +759,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK: case SMU_UCLK:
case SMU_DCEFCLK: case SMU_DCEFCLK:
case SMU_FCLK: case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
}
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret) if (ret)
return size; return size;

View File

@ -201,6 +201,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false; crtc_state->update_wm_post = false;
crtc_state->fb_changed = false; crtc_state->fb_changed = false;
crtc_state->fifo_changed = false; crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false; crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0; crtc_state->fb_bits = 0;
crtc_state->update_planes = 0; crtc_state->update_planes = 0;

View File

@ -990,6 +990,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
dev_priv->display.color_commit(crtc_state); dev_priv->display.color_commit(crtc_state);
} }
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
return !old_crtc_state->base.gamma_lut &&
!old_crtc_state->base.degamma_lut;
}
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
/*
* CGM_PIPE_MODE is itself single buffered. We'd have to
* somehow split it out from chv_load_luts() if we wanted
* the ability to preload the CGM LUTs/CSC without tearing.
*/
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
return false;
return !old_crtc_state->base.gamma_lut;
}
static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct intel_atomic_state *state =
to_intel_atomic_state(new_crtc_state->base.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
/*
* The hardware degamma is active whenever the pipe
* CSC is active. Thus even if the old state has no
* software degamma we need to avoid clobbering the
* linear hardware degamma mid scanout.
*/
return !old_crtc_state->csc_enable &&
!old_crtc_state->base.gamma_lut;
}
int intel_color_check(struct intel_crtc_state *crtc_state) int intel_color_check(struct intel_crtc_state *crtc_state)
{ {
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
@ -1133,6 +1182,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1185,6 +1236,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1224,6 +1277,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1281,6 +1336,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1319,6 +1376,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret) if (ret)
return ret; return ret;
crtc_state->preload_luts = glk_can_preload_luts(crtc_state);
return 0; return 0;
} }
@ -1368,6 +1427,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
crtc_state->csc_mode = icl_csc_mode(crtc_state); crtc_state->csc_mode = icl_csc_mode(crtc_state);
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0; return 0;
} }

View File

@ -2504,6 +2504,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
* the highest stride limits of them all. * the highest stride limits of them all.
*/ */
crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
if (!crtc)
return 0;
plane = to_intel_plane(crtc->base.primary); plane = to_intel_plane(crtc->base.primary);
return plane->max_stride(plane, pixel_format, modifier, return plane->max_stride(plane, pixel_format, modifier,
@ -13740,6 +13743,11 @@ static void intel_update_crtc(struct intel_crtc *crtc,
/* vblanks work again, re-enable pipe CRC. */ /* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc); intel_crtc_enable_pipe_crc(crtc);
} else { } else {
if (new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
intel_pre_plane_update(old_crtc_state, new_crtc_state); intel_pre_plane_update(old_crtc_state, new_crtc_state);
if (new_crtc_state->update_pipe) if (new_crtc_state->update_pipe)
@ -14034,6 +14042,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->base.active && if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) && !needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
(new_crtc_state->base.color_mgmt_changed || (new_crtc_state->base.color_mgmt_changed ||
new_crtc_state->update_pipe)) new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state); intel_color_load_luts(new_crtc_state);

View File

@ -761,6 +761,7 @@ struct intel_crtc_state {
bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */ bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */ bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
/* Pipe source size (ie. panel fitter input size) /* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space, * All planes will be positioned inside this space,

View File

@ -235,6 +235,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = ggtt->gmadr.start; info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end; info->apertures->ranges[0].size = ggtt->mappable_end;
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
(unsigned long)(ggtt->gmadr.start + vma->node.start);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma); vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@ -244,10 +249,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr; info->screen_base = vaddr;
info->screen_size = vma->node.size; info->screen_size = vma->node.size;
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start = (unsigned long)info->screen_base;
info->fix.smem_len = info->screen_size;
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages. /* If the object is shmemfs backed, it will have given us zeroed pages.

View File

@ -671,8 +671,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
obj->mm.dirty = false; obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty) if (obj->mm.dirty && trylock_page(page)) {
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
* the page in order to dirty it -- holding
* the page reference is not sufficient to
* prevent the inode from being truncated.
* Play safe and take the lock.
*
* However...!
*
* The mmu-notifier can be invalidated for a
* migrate_page, that is alreadying holding the lock
* on the page. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing
* some page dirtying.
*/
set_page_dirty(page); set_page_dirty(page);
unlock_page(page);
}
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); put_page(page);

View File

@ -103,6 +103,8 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_CAST(obj); return ERR_CAST(obj);
} }
i915_gem_object_set_readonly(obj);
node->obj = obj; node->obj = obj;
return node; return node;
} }

View File

@ -843,8 +843,8 @@ create_event_attributes(struct i915_pmu *pmu)
const char *name; const char *name;
const char *unit; const char *unit;
} events[] = { } events[] = {
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
}; };

View File

@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority | __NO_PREEMPTION; return rq->sched.attr.priority | __NO_PREEMPTION;
} }
static void kick_submission(struct intel_engine_cs *engine, int prio) static inline bool need_preempt(int prio, int active)
{ {
const struct i915_request *inflight = *engine->execlists.active; /*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*/
return prio >= max(I915_PRIORITY_NORMAL, active);
}
static void kick_submission(struct intel_engine_cs *engine,
const struct i915_request *rq,
int prio)
{
const struct i915_request *inflight;
/*
* We only need to kick the tasklet once for the high priority
* new context we add into the queue.
*/
if (prio <= engine->execlists.queue_priority_hint)
return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
goto unlock;
/* /*
* If we are already the currently executing context, don't * If we are already the currently executing context, don't
@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue * tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context. * sufficiently to oust the running context.
*/ */
if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) if (inflight->hw_context == rq->hw_context)
return; goto unlock;
tasklet_hi_schedule(&engine->execlists.tasklet); engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
unlock:
rcu_read_unlock();
} }
static void __i915_schedule(struct i915_sched_node *node, static void __i915_schedule(struct i915_sched_node *node,
@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node,
list_move_tail(&node->link, cache.priolist); list_move_tail(&node->link, cache.priolist);
} }
if (prio <= engine->execlists.queue_priority_hint)
continue;
engine->execlists.queue_priority_hint = prio;
/* Defer (tasklet) submission until after all of our updates. */ /* Defer (tasklet) submission until after all of our updates. */
kick_submission(engine, prio); kick_submission(engine, node_to_request(node), prio);
} }
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);