Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Just general fixes: radeon, i915, atmel, tegra, amdkfd and one core
  fix"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (28 commits)
  drm: atmel-hlcdc: remove clock polarity from crtc driver
  drm/radeon: only enable DP audio if the monitor supports it
  drm/radeon: fix atom aux payload size check for writes (v2)
  drm/radeon: fix 1 RB harvest config setup for TN/RL
  drm/radeon: enable SRBM timeout interrupt on EG/NI
  drm/radeon: enable SRBM timeout interrupt on SI
  drm/radeon: enable SRBM timeout interrupt on CIK v2
  drm/radeon: dump full IB if we hit a packet error
  drm/radeon: disable mclk switching with 120hz+ monitors
  drm/radeon: use drm_mode_vrefresh() rather than mode->vrefresh
  drm/radeon: enable native backlight control on old macs
  drm/i915: Fix frontbuffer false positve.
  drm/i915: Align initial plane backing objects correctly
  drm/i915: avoid processing spurious/shared interrupts in low-power states
  drm/i915: Check obj->vma_list under the struct_mutex
  drm/i915: Fix a use after free, and unbalanced refcounting
  drm: atmel-hlcdc: remove useless pm_runtime_put_sync in probe
  drm: atmel-hlcdc: reset layer A2Q and UPDATE bits when disabling it
  drm: Fix deadlock due to getconnector locking changes
  drm/i915: Dell Chromebook 11 has PWM backlight
  ...
This commit is contained in:
Linus Torvalds 2015-02-28 10:36:48 -08:00
commit ae1aa797e0
31 changed files with 235 additions and 98 deletions

View File

@ -62,12 +62,18 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
return KFD_MQD_TYPE_CP;
}
static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
unsigned int get_first_pipe(struct device_queue_manager *dqm)
{
BUG_ON(!dqm);
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.first_compute_pipe;
}
unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
}
static inline unsigned int get_pipes_num_cpsch(void)
{
return PIPE_PER_ME_CP_SCHEDULING;

View File

@ -163,6 +163,8 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe);
unsigned int get_first_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_num(struct device_queue_manager *dqm);
extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@ -175,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
}
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */

View File

@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm,
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
{
return init_pipelines(dqm, get_pipes_num(dqm), 0);
return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
}

View File

@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
cfg = ATMEL_HLCDC_CLKPOL;
cfg = 0;
prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = mode->crtc_clock * 1000;

View File

@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
pm_runtime_enable(dev->dev);
pm_runtime_put_sync(dev->dev);
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");

View File

@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
/* Disable the layer */
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
ATMEL_HLCDC_LAYER_RST);
ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
ATMEL_HLCDC_LAYER_UPDATE);
/* Clear all pending interrupts */
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);

View File

@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
connector = drm_connector_find(dev, out_resp->connector_id);
if (!connector) {
@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->mm_height = connector->display_info.height_mm;
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;

View File

@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* number comparisons on buffer last_read|write_seqno. It also allows an
* emission time to be associated with the request for tracking how far ahead
* of the GPU the submission is.
*
* The requests are reference counted, so upon creation they should have an
* initial reference taken using kref_init
*/
struct drm_i915_gem_request {
struct kref ref;
@ -2137,7 +2140,16 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
/** Context related to this request */
/**
* Context related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
* it persists while any request is linked to it. Requests themselves
* are also refcounted, so the request will only be freed when the last
* reference to it is dismissed, and the code in
* i915_gem_request_free() will then decrement the refcount on the
* context.
*/
struct intel_context *ctx;
/** Batch buffer related to this request if any */
@ -2374,6 +2386,7 @@ struct drm_i915_cmd_table {
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xb || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)

View File

@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(ring, submit_req->ctx);
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
i915_gem_request_unreference(submit_req);
}
/*

View File

@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
BUG_ON(stolen_offset & 4095);
BUG_ON(size & 4095);
if (WARN_ON(size == 0))
if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
WARN_ON(stolen_offset & 4095))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);

View File

@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
ret = -EBUSY;
goto err;
}
if (args->tiling_mode == I915_TILING_NONE) {
@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->bit_17 = NULL;
}
err:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);

View File

@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
while (true) {
/* Find, clear, then process each source of interrupt */
@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
for (;;) {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
intel_uncore_check_errors(dev);
@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
if (IS_GEN9(dev))
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
iir = I915_READ(IIR);
for (;;) {
@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
dev_priv->pm.irqs_enabled = false;
synchronize_irq(dev_priv->dev->irq);
}
/**

View File

@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
u32 base = plane_config->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
size_aligned -= base_aligned;
if (plane_config->size == 0)
return false;
obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
plane_config->size);
obj = i915_gem_object_create_stolen_for_preallocated(dev,
base_aligned,
base_aligned,
size_aligned);
if (!obj)
return false;
@ -2725,10 +2731,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_ARGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_ABGR8888:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_XRGB2101010:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
@ -6627,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), plane, fb->width, fb->height,
@ -7664,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
@ -7755,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
aligned_height = intel_fb_align_height(dev, fb->height,
plane_config->tiling);
plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
@ -12182,9 +12197,6 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
if (fb == crtc->cursor->fb)
return 0;
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
@ -13096,6 +13108,9 @@ static struct intel_quirk intel_quirks[] = {
/* HP Chromebook 14 (Celeron 2955U) */
{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
/* Dell Chromebook 11 */
{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
};
static void intel_init_quirks(struct drm_device *dev)

View File

@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
* If there isn't a request associated with this submission,
* create one as a temporary holder.
*/
WARN(1, "execlist context submission without request");
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
request->ring = ring;
request->ctx = to;
kref_init(&request->ref);
request->uniq = dev_priv->request_uniq++;
i915_gem_context_reference(request->ctx);
} else {
i915_gem_request_reference(request);
WARN_ON(to != request->ctx);
}
request->tail = tail;
i915_gem_request_reference(request);
i915_gem_context_reference(request->ctx);
intel_runtime_pm_get(dev_priv);
@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(ctx);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}

View File

@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
/* The atom implementation only supports writes with a max payload of
* 12 bytes since it uses 4 bits for the total count (header + payload)
* in the parameter space. The atom interface supports 16 byte
* payloads for reads. The hw itself supports up to 16 bytes of payload.
*/
if (WARN_ON_ONCE(msg->size > 12))
return -E2BIG;
/* tx_size needs to be 4 even for bare address packets since the atom
* table needs the info in tx_buf[3].
*/

View File

@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
if (radeon_audio != 0 &&
drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
} else if (radeon_audio != 0) {
@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
break;
case DRM_MODE_CONNECTOR_eDP:
if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
if (radeon_audio != 0 &&
drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
@ -1720,8 +1724,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
}
encoder_mode = atombios_get_encoder_mode(encoder);
if (radeon_audio != 0 &&
(encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
(ENCODER_MODE_IS_DP(encoder_mode) &&
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
radeon_audio_dpms(encoder, mode);
}
@ -2136,6 +2142,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int encoder_mode;
radeon_encoder->pixel_clock = adjusted_mode->clock;
@ -2164,8 +2171,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
encoder_mode = atombios_get_encoder_mode(encoder);
if (radeon_audio != 0 &&
(encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode)))
if (connector && (radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
(ENCODER_MODE_IS_DP(encoder_mode) &&
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
radeon_audio_mode_set(encoder, adjusted_mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:

View File

@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
WREG32(SRBM_INT_CNTL, 0x1);
WREG32(SRBM_INT_ACK, 0x1);
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
/* grbm */
WREG32(GRBM_INT_CNTL, 0);
/* SRBM */
WREG32(SRBM_INT_CNTL, 0);
/* vline/vblank, etc. */
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@ -8046,6 +8050,10 @@ int cik_irq_process(struct radeon_device *rdev)
break;
}
break;
case 96:
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
WREG32(SRBM_INT_ACK, 0x1);
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);

View File

@ -482,6 +482,10 @@
#define SOFT_RESET_ORB (1 << 23)
#define SOFT_RESET_VCE (1 << 24)
#define SRBM_READ_ERROR 0xE98
#define SRBM_INT_CNTL 0xEA0
#define SRBM_INT_ACK 0xEA8
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)

View File

@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
WREG32(SRBM_INT_CNTL, 0x1);
WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(SRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
@ -5066,6 +5069,10 @@ int evergreen_irq_process(struct radeon_device *rdev)
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
case 96:
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
WREG32(SRBM_INT_ACK, 0x1);
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);

View File

@ -1191,6 +1191,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
#define SRBM_READ_ERROR 0xE98
#define SRBM_INT_CNTL 0xEA0
#define SRBM_INT_ACK 0xEA8
/* display watermarks */
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define PRIORITY_A_CNT 0x6b18

View File

@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
WREG32(SRBM_INT_CNTL, 0x1);
WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@ -1086,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
if ((disabled_rb_mask & 3) == 1) {
/* RB0 disabled, RB1 enabled */
tmp = 0x11111111;
} else {
if ((disabled_rb_mask & 3) == 2) {
/* RB1 disabled, RB0 enabled */
tmp = 0x00000000;
} else {
/* RB0 disabled, RB1 enabled */
tmp = 0x11111111;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;

View File

@ -82,6 +82,10 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
#define SRBM_READ_ERROR 0xE98
#define SRBM_INT_CNTL 0xEA0
#define SRBM_INT_ACK 0xEA8
#define SRBM_STATUS2 0x0EC4
#define DMA_BUSY (1 << 5)
#define DMA1_BUSY (1 << 6)

View File

@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
vrefresh = radeon_crtc->hw_mode.vrefresh;
vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
break;
}
}

View File

@ -715,6 +715,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_device *rdev = p->rdev;
uint32_t header;
int ret = 0, i;
if (idx >= ib_chunk->length_dw) {
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
@ -743,14 +744,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
return -EINVAL;
ret = -EINVAL;
goto dump_ib;
}
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
return -EINVAL;
ret = -EINVAL;
goto dump_ib;
}
return 0;
dump_ib:
for (i = 0; i < ib_chunk->length_dw; i++) {
if (i == idx)
printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
else
printk("\t0x%08x\n", radeon_get_ib_value(p, i));
}
return ret;
}
/**

View File

@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
(rdev->pdev->subsystem_vendor == 0x1734) &&
(rdev->pdev->subsystem_device == 0x1107))
use_bl = false;
/* Older PPC macs use on-GPU backlight controller */
#ifndef CONFIG_PPC_PMAC
/* disable native backlight control on older asics */
else if (rdev->family < CHIP_R600)
use_bl = false;
#endif
else
use_bl = true;
}

View File

@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
single_display = false;
}
/* 120hz tends to be problematic even if they are under the
* vblank limit.
*/
if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
single_display = false;
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/

View File

@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
WREG32(SRBM_INT_CNTL, 1);
WREG32(SRBM_INT_ACK, 1);
evergreen_fix_pci_max_read_req_size(rdev);
@ -4699,12 +4701,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
for (i = 0; i < ib->length_dw; i++) {
if (i == idx)
printk("\t0x%08x <---\n", ib->ptr[i]);
else
printk("\t0x%08x\n", ib->ptr[i]);
}
ret = -EINVAL;
break;
case RADEON_PACKET_TYPE2:
@ -4736,8 +4732,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = -EINVAL;
break;
}
if (ret)
if (ret) {
for (i = 0; i < ib->length_dw; i++) {
if (i == idx)
printk("\t0x%08x <---\n", ib->ptr[i]);
else
printk("\t0x%08x\n", ib->ptr[i]);
}
break;
}
} while (idx < ib->length_dw);
return ret;
@ -5910,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
WREG32(SRBM_INT_CNTL, 0);
if (rdev->num_crtc >= 2) {
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@ -6609,6 +6613,10 @@ int si_irq_process(struct radeon_device *rdev)
break;
}
break;
case 96:
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
WREG32(SRBM_INT_ACK, 0x1);
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);

View File

@ -358,6 +358,10 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
#define SRBM_READ_ERROR 0xE98
#define SRBM_INT_CNTL 0xEA0
#define SRBM_INT_ACK 0xEA8
#define SRBM_STATUS2 0x0EC4
#define DMA_BUSY (1 << 5)
#define DMA1_BUSY (1 << 6)

View File

@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
crtc->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state)
if (state) {
crtc->state = &state->base;
crtc->state->crtc = crtc;
}
}
static struct drm_crtc_state *
@ -1012,6 +1014,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
return NULL;
copy->base.mode_changed = false;
copy->base.active_changed = false;
copy->base.planes_changed = false;
copy->base.event = NULL;
@ -1227,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
/* program display mode */
tegra_dc_set_timings(dc, mode);
if (dc->soc->supports_border_color)
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
/* interlacing isn't supported yet, so disable it */
if (dc->soc->supports_interlacing) {
value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL);
@ -1252,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void tegra_crtc_prepare(struct drm_crtc *crtc)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
unsigned int syncpt;
unsigned long value;
drm_crtc_vblank_off(crtc);
if (dc->pipe)
syncpt = SYNCPT_VBLANK1;
else
syncpt = SYNCPT_VBLANK0;
/* initialize display controller */
tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
}
static void tegra_crtc_commit(struct drm_crtc *crtc)
@ -1664,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
unsigned int syncpt;
u32 value;
int err;
if (tegra->domain) {
@ -1730,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
/* initialize display controller */
if (dc->pipe)
syncpt = SYNCPT_VBLANK1;
else
syncpt = SYNCPT_VBLANK0;
tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
/* initialize timer */
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
if (dc->soc->supports_border_color)
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
return 0;
cleanup:

View File

@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
err = clk_set_rate(hdmi->clk, pclk);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
}
DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk));
/* power up sequence */
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0);
value &= ~SOR_PLL_PDBG;

View File

@ -214,9 +214,9 @@
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
#define _INTEL_BDW_M_IDS(gt, info) \
_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
_INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
#define _INTEL_BDW_D_IDS(gt, info) \