mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 02:46:55 +07:00
Merge tag 'drm-intel-next-2014-07-25-merged' of git://anongit.freedesktop.org/drm-intel into drm-next
Final feature pull for 3.17. drm-intel-next-2014-07-25: - Ditch UMS support (well just the config option for now) - Prep work for future platforms (Sonika Jindal, Damien) - runtime pm/soix fixes (Paulo, Jesse) - psr tracking improvements, locking fixes, now enabled by default! - rps fixes for chv (Deepak, Ville) - drm core patches for rotation support (Ville, Sagar Kamble) - the i915 parts unfortunately didn't make it yet - userptr fixes (Chris) - minimum backlight brightness (Jani), acked long ago by Matthew Garret on irc - I've forgotten about this patch :( QA is a bit unhappy about the DP MST stuff since it broke hpd testing a bit, but otherwise looks sane. I've backmerged drm-next to resolve conflicts with the mst stuff, which means the new tag itself doesn't contain the overview as usual. * tag 'drm-intel-next-2014-07-25-merged' of git://anongit.freedesktop.org/drm-intel: (75 commits) drm/i915/userptr: Keep spin_lock/unlock in the same block drm/i915: Allow overlapping userptr objects drm/i915: Ditch UMS config option drm/i915: respect the VBT minimum backlight brightness drm/i915: extract backlight minimum brightness from VBT drm/i915: Replace HAS_PCH_SPLIT which incorrectly lets some platforms in drm/i915: Returning from increase/decrease of pllclock when invalid drm/i915: Setting legacy palette correctly for different platforms drm/i915: Avoid incorrect returning for some platforms drm/i915: Writing proper check for reading of pipe status reg drm/i915: Returning the right VGA control reg for platforms drm/i915: Allowing changing of wm latencies for valid platforms drm/i915: Adding HAS_GMCH_DISPLAY macro drm/i915: Fix possible overflow when recording semaphore states. drm/i915: Do not unmap object unless no other VMAs reference it drm/i915: remove plane/cursor/pipe assertions from intel_crtc_disable drm/i915: Reorder ctx unref on ppgtt cleanup drm/i915/error: Check the potential ctx obj's vm drm/i915: Fix printing proper min/min/rpe values in debugfs drm/i915: BDW can also detect unclaimed registers ...
This commit is contained in:
commit
c759606c96
@ -2508,7 +2508,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >Description/Restrictions</td>
|
<td valign="top" >Description/Restrictions</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="20" valign="top" >DRM</td>
|
<td rowspan="21" valign="top" >DRM</td>
|
||||||
<td rowspan="2" valign="top" >Generic</td>
|
<td rowspan="2" valign="top" >Generic</td>
|
||||||
<td valign="top" >“EDID”</td>
|
<td valign="top" >“EDID”</td>
|
||||||
<td valign="top" >BLOB | IMMUTABLE</td>
|
<td valign="top" >BLOB | IMMUTABLE</td>
|
||||||
@ -2639,7 +2639,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="2" valign="top" >Optional</td>
|
<td rowspan="3" valign="top" >Optional</td>
|
||||||
<td valign="top" >“scaling mode”</td>
|
<td valign="top" >“scaling mode”</td>
|
||||||
<td valign="top" >ENUM</td>
|
<td valign="top" >ENUM</td>
|
||||||
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
|
<td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
|
||||||
@ -2647,6 +2647,15 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
|
<td valign="top" >"aspect ratio"</td>
|
||||||
|
<td valign="top" >ENUM</td>
|
||||||
|
<td valign="top" >{ "None", "4:3", "16:9" }</td>
|
||||||
|
<td valign="top" >Connector</td>
|
||||||
|
<td valign="top" >DRM property to set aspect ratio from user space app.
|
||||||
|
This enum is made generic to allow addition of custom aspect
|
||||||
|
ratios.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
<td valign="top" >“dirty”</td>
|
<td valign="top" >“dirty”</td>
|
||||||
<td valign="top" >ENUM | IMMUTABLE</td>
|
<td valign="top" >ENUM | IMMUTABLE</td>
|
||||||
<td valign="top" >{ "Off", "On", "Annotate" }</td>
|
<td valign="top" >{ "Off", "On", "Annotate" }</td>
|
||||||
@ -2655,7 +2664,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="21" valign="top" >i915</td>
|
<td rowspan="21" valign="top" >i915</td>
|
||||||
<td rowspan="3" valign="top" >Generic</td>
|
<td rowspan="2" valign="top" >Generic</td>
|
||||||
<td valign="top" >"Broadcast RGB"</td>
|
<td valign="top" >"Broadcast RGB"</td>
|
||||||
<td valign="top" >ENUM</td>
|
<td valign="top" >ENUM</td>
|
||||||
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
|
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
|
||||||
@ -2670,10 +2679,11 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td valign="top" >Standard name as in DRM</td>
|
<td rowspan="1" valign="top" >Plane</td>
|
||||||
<td valign="top" >Standard type as in DRM</td>
|
<td valign="top" >“rotation”</td>
|
||||||
<td valign="top" >Standard value as in DRM</td>
|
<td valign="top" >BITMASK</td>
|
||||||
<td valign="top" >Standard Object as in DRM</td>
|
<td valign="top" >{ 0, "rotate-0" }, { 2, "rotate-180" }</td>
|
||||||
|
<td valign="top" >Plane</td>
|
||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
@ -2805,8 +2815,8 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="3" valign="top" >CDV gma-500</td>
|
<td rowspan="2" valign="top" >CDV gma-500</td>
|
||||||
<td rowspan="3" valign="top" >Generic</td>
|
<td rowspan="2" valign="top" >Generic</td>
|
||||||
<td valign="top" >"Broadcast RGB"</td>
|
<td valign="top" >"Broadcast RGB"</td>
|
||||||
<td valign="top" >ENUM</td>
|
<td valign="top" >ENUM</td>
|
||||||
<td valign="top" >{ “Full”, “Limited 16:235” }</td>
|
<td valign="top" >{ “Full”, “Limited 16:235” }</td>
|
||||||
@ -2821,15 +2831,8 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td valign="top" >Standard name as in DRM</td>
|
<td rowspan="19" valign="top" >Poulsbo</td>
|
||||||
<td valign="top" >Standard type as in DRM</td>
|
<td rowspan="1" valign="top" >Generic</td>
|
||||||
<td valign="top" >Standard value as in DRM</td>
|
|
||||||
<td valign="top" >Standard Object as in DRM</td>
|
|
||||||
<td valign="top" >TBD</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td rowspan="20" valign="top" >Poulsbo</td>
|
|
||||||
<td rowspan="2" valign="top" >Generic</td>
|
|
||||||
<td valign="top" >“backlight”</td>
|
<td valign="top" >“backlight”</td>
|
||||||
<td valign="top" >RANGE</td>
|
<td valign="top" >RANGE</td>
|
||||||
<td valign="top" >Min=0, Max=100</td>
|
<td valign="top" >Min=0, Max=100</td>
|
||||||
@ -2837,13 +2840,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td valign="top" >Standard name as in DRM</td>
|
|
||||||
<td valign="top" >Standard type as in DRM</td>
|
|
||||||
<td valign="top" >Standard value as in DRM</td>
|
|
||||||
<td valign="top" >Standard Object as in DRM</td>
|
|
||||||
<td valign="top" >TBD</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td rowspan="17" valign="top" >SDVO-TV</td>
|
<td rowspan="17" valign="top" >SDVO-TV</td>
|
||||||
<td valign="top" >“mode”</td>
|
<td valign="top" >“mode”</td>
|
||||||
<td valign="top" >ENUM</td>
|
<td valign="top" >ENUM</td>
|
||||||
@ -3070,7 +3066,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="3" valign="top" >i2c/ch7006_drv</td>
|
<td rowspan="2" valign="top" >i2c/ch7006_drv</td>
|
||||||
<td valign="top" >Generic</td>
|
<td valign="top" >Generic</td>
|
||||||
<td valign="top" >“scale”</td>
|
<td valign="top" >“scale”</td>
|
||||||
<td valign="top" >RANGE</td>
|
<td valign="top" >RANGE</td>
|
||||||
@ -3079,14 +3075,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="2" valign="top" >TV</td>
|
<td rowspan="1" valign="top" >TV</td>
|
||||||
<td valign="top" >Standard names as in DRM</td>
|
|
||||||
<td valign="top" >Standard types as in DRM</td>
|
|
||||||
<td valign="top" >Standard Values as in DRM</td>
|
|
||||||
<td valign="top" >Standard object as in DRM</td>
|
|
||||||
<td valign="top" >TBD</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td valign="top" >“mode”</td>
|
<td valign="top" >“mode”</td>
|
||||||
<td valign="top" >ENUM</td>
|
<td valign="top" >ENUM</td>
|
||||||
<td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
|
<td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
|
||||||
@ -3095,7 +3084,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="16" valign="top" >nouveau</td>
|
<td rowspan="15" valign="top" >nouveau</td>
|
||||||
<td rowspan="6" valign="top" >NV10 Overlay</td>
|
<td rowspan="6" valign="top" >NV10 Overlay</td>
|
||||||
<td valign="top" >"colorkey"</td>
|
<td valign="top" >"colorkey"</td>
|
||||||
<td valign="top" >RANGE</td>
|
<td valign="top" >RANGE</td>
|
||||||
@ -3204,14 +3193,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td valign="top" >Generic</td>
|
|
||||||
<td valign="top" >Standard name as in DRM</td>
|
|
||||||
<td valign="top" >Standard type as in DRM</td>
|
|
||||||
<td valign="top" >Standard value as in DRM</td>
|
|
||||||
<td valign="top" >Standard Object as in DRM</td>
|
|
||||||
<td valign="top" >TBD</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td rowspan="2" valign="top" >omap</td>
|
<td rowspan="2" valign="top" >omap</td>
|
||||||
<td rowspan="2" valign="top" >Generic</td>
|
<td rowspan="2" valign="top" >Generic</td>
|
||||||
<td valign="top" >“rotation”</td>
|
<td valign="top" >“rotation”</td>
|
||||||
@ -3242,7 +3223,7 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td rowspan="10" valign="top" >radeon</td>
|
<td rowspan="9" valign="top" >radeon</td>
|
||||||
<td valign="top" >DVI-I</td>
|
<td valign="top" >DVI-I</td>
|
||||||
<td valign="top" >“coherent”</td>
|
<td valign="top" >“coherent”</td>
|
||||||
<td valign="top" >RANGE</td>
|
<td valign="top" >RANGE</td>
|
||||||
@ -3314,14 +3295,6 @@ void intel_crt_init(struct drm_device *dev)
|
|||||||
<td valign="top" >TBD</td>
|
<td valign="top" >TBD</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td valign="top" >Generic</td>
|
|
||||||
<td valign="top" >Standard name as in DRM</td>
|
|
||||||
<td valign="top" >Standard type as in DRM</td>
|
|
||||||
<td valign="top" >Standard value as in DRM</td>
|
|
||||||
<td valign="top" >Standard Object as in DRM</td>
|
|
||||||
<td valign="top" >TBD</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td rowspan="3" valign="top" >rcar-du</td>
|
<td rowspan="3" valign="top" >rcar-du</td>
|
||||||
<td rowspan="3" valign="top" >Generic</td>
|
<td rowspan="3" valign="top" >Generic</td>
|
||||||
<td valign="top" >"alpha"</td>
|
<td valign="top" >"alpha"</td>
|
||||||
|
@ -182,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
|
|||||||
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
|
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
|
||||||
|
{ DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
|
||||||
|
{ DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
|
||||||
|
{ DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Non-global properties, but "required" for certain connectors.
|
* Non-global properties, but "required" for certain connectors.
|
||||||
*/
|
*/
|
||||||
@ -1462,6 +1468,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
|
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_mode_create_aspect_ratio_property - create aspect ratio property
|
||||||
|
* @dev: DRM device
|
||||||
|
*
|
||||||
|
* Called by a driver the first time it's needed, must be attached to desired
|
||||||
|
* connectors.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* Zero on success, errno on failure.
|
||||||
|
*/
|
||||||
|
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
if (dev->mode_config.aspect_ratio_property)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dev->mode_config.aspect_ratio_property =
|
||||||
|
drm_property_create_enum(dev, 0, "aspect ratio",
|
||||||
|
drm_aspect_ratio_enum_list,
|
||||||
|
ARRAY_SIZE(drm_aspect_ratio_enum_list));
|
||||||
|
|
||||||
|
if (dev->mode_config.aspect_ratio_property == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_create_dirty_property - create dirty property
|
* drm_mode_create_dirty_property - create dirty property
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
@ -3476,19 +3509,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
|
|||||||
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
||||||
int flags, const char *name,
|
int flags, const char *name,
|
||||||
const struct drm_prop_enum_list *props,
|
const struct drm_prop_enum_list *props,
|
||||||
int num_values)
|
int num_props,
|
||||||
|
uint64_t supported_bits)
|
||||||
{
|
{
|
||||||
struct drm_property *property;
|
struct drm_property *property;
|
||||||
int i, ret;
|
int i, ret, index = 0;
|
||||||
|
int num_values = hweight64(supported_bits);
|
||||||
|
|
||||||
flags |= DRM_MODE_PROP_BITMASK;
|
flags |= DRM_MODE_PROP_BITMASK;
|
||||||
|
|
||||||
property = drm_property_create(dev, flags, name, num_values);
|
property = drm_property_create(dev, flags, name, num_values);
|
||||||
if (!property)
|
if (!property)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
for (i = 0; i < num_props; i++) {
|
||||||
|
if (!(supported_bits & (1ULL << props[i].type)))
|
||||||
|
continue;
|
||||||
|
|
||||||
for (i = 0; i < num_values; i++) {
|
if (WARN_ON(index >= num_values)) {
|
||||||
ret = drm_property_add_enum(property, i,
|
drm_property_destroy(dev, property);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = drm_property_add_enum(property, index++,
|
||||||
props[i].type,
|
props[i].type,
|
||||||
props[i].name);
|
props[i].name);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -4936,6 +4978,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
|
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_rotation_simplify() - Try to simplify the rotation
|
||||||
|
* @rotation: Rotation to be simplified
|
||||||
|
* @supported_rotations: Supported rotations
|
||||||
|
*
|
||||||
|
* Attempt to simplify the rotation to a form that is supported.
|
||||||
|
* Eg. if the hardware supports everything except DRM_REFLECT_X
|
||||||
|
* one could call this function like this:
|
||||||
|
*
|
||||||
|
* drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
|
||||||
|
* BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
|
||||||
|
* BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
|
||||||
|
*
|
||||||
|
* to eliminate the DRM_ROTATE_X flag. Depending on what kind of
|
||||||
|
* transforms the hardware supports, this function may not
|
||||||
|
* be able to produce a supported transform, so the caller should
|
||||||
|
* check the result afterwards.
|
||||||
|
*/
|
||||||
|
unsigned int drm_rotation_simplify(unsigned int rotation,
|
||||||
|
unsigned int supported_rotations)
|
||||||
|
{
|
||||||
|
if (rotation & ~supported_rotations) {
|
||||||
|
rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
|
||||||
|
rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rotation;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_rotation_simplify);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_config_init - initialize DRM mode_configuration structure
|
* drm_mode_config_init - initialize DRM mode_configuration structure
|
||||||
* @dev: DRM device
|
* @dev: DRM device
|
||||||
@ -5054,3 +5126,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
|
|||||||
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
|
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mode_config_cleanup);
|
EXPORT_SYMBOL(drm_mode_config_cleanup);
|
||||||
|
|
||||||
|
struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
|
||||||
|
unsigned int supported_rotations)
|
||||||
|
{
|
||||||
|
static const struct drm_prop_enum_list props[] = {
|
||||||
|
{ DRM_ROTATE_0, "rotate-0" },
|
||||||
|
{ DRM_ROTATE_90, "rotate-90" },
|
||||||
|
{ DRM_ROTATE_180, "rotate-180" },
|
||||||
|
{ DRM_ROTATE_270, "rotate-270" },
|
||||||
|
{ DRM_REFLECT_X, "reflect-x" },
|
||||||
|
{ DRM_REFLECT_Y, "reflect-y" },
|
||||||
|
};
|
||||||
|
|
||||||
|
return drm_property_create_bitmask(dev, 0, "rotation",
|
||||||
|
props, ARRAY_SIZE(props),
|
||||||
|
supported_rotations);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mode_create_rotation_property);
|
||||||
|
@ -3776,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
|
|||||||
|
|
||||||
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
|
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
|
||||||
|
|
||||||
/* Populate picture aspect ratio from CEA mode list */
|
/*
|
||||||
if (frame->video_code > 0)
|
* Populate picture aspect ratio from either
|
||||||
|
* user input (if specified) or from the CEA mode list.
|
||||||
|
*/
|
||||||
|
if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
|
||||||
|
mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
|
||||||
|
frame->picture_aspect = mode->picture_aspect_ratio;
|
||||||
|
else if (frame->video_code > 0)
|
||||||
frame->picture_aspect = drm_get_cea_aspect_ratio(
|
frame->picture_aspect = drm_get_cea_aspect_ratio(
|
||||||
frame->video_code);
|
frame->video_code);
|
||||||
|
|
||||||
|
@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
|
|||||||
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
|
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_rect_debug_print);
|
EXPORT_SYMBOL(drm_rect_debug_print);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_rect_rotate - Rotate the rectangle
|
||||||
|
* @r: rectangle to be rotated
|
||||||
|
* @width: Width of the coordinate space
|
||||||
|
* @height: Height of the coordinate space
|
||||||
|
* @rotation: Transformation to be applied
|
||||||
|
*
|
||||||
|
* Apply @rotation to the coordinates of rectangle @r.
|
||||||
|
*
|
||||||
|
* @width and @height combined with @rotation define
|
||||||
|
* the location of the new origin.
|
||||||
|
*
|
||||||
|
* @width correcsponds to the horizontal and @height
|
||||||
|
* to the vertical axis of the untransformed coordinate
|
||||||
|
* space.
|
||||||
|
*/
|
||||||
|
void drm_rect_rotate(struct drm_rect *r,
|
||||||
|
int width, int height,
|
||||||
|
unsigned int rotation)
|
||||||
|
{
|
||||||
|
struct drm_rect tmp;
|
||||||
|
|
||||||
|
if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
|
||||||
|
tmp = *r;
|
||||||
|
|
||||||
|
if (rotation & BIT(DRM_REFLECT_X)) {
|
||||||
|
r->x1 = width - tmp.x2;
|
||||||
|
r->x2 = width - tmp.x1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rotation & BIT(DRM_REFLECT_Y)) {
|
||||||
|
r->y1 = height - tmp.y2;
|
||||||
|
r->y2 = height - tmp.y1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (rotation & 0xf) {
|
||||||
|
case BIT(DRM_ROTATE_0):
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_90):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = tmp.y1;
|
||||||
|
r->x2 = tmp.y2;
|
||||||
|
r->y1 = width - tmp.x2;
|
||||||
|
r->y2 = width - tmp.x1;
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_180):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = width - tmp.x2;
|
||||||
|
r->x2 = width - tmp.x1;
|
||||||
|
r->y1 = height - tmp.y2;
|
||||||
|
r->y2 = height - tmp.y1;
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_270):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = height - tmp.y2;
|
||||||
|
r->x2 = height - tmp.y1;
|
||||||
|
r->y1 = tmp.x1;
|
||||||
|
r->y2 = tmp.x2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_rect_rotate);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_rect_rotate_inv - Inverse rotate the rectangle
|
||||||
|
* @r: rectangle to be rotated
|
||||||
|
* @width: Width of the coordinate space
|
||||||
|
* @height: Height of the coordinate space
|
||||||
|
* @rotation: Transformation whose inverse is to be applied
|
||||||
|
*
|
||||||
|
* Apply the inverse of @rotation to the coordinates
|
||||||
|
* of rectangle @r.
|
||||||
|
*
|
||||||
|
* @width and @height combined with @rotation define
|
||||||
|
* the location of the new origin.
|
||||||
|
*
|
||||||
|
* @width correcsponds to the horizontal and @height
|
||||||
|
* to the vertical axis of the original untransformed
|
||||||
|
* coordinate space, so that you never have to flip
|
||||||
|
* them when doing a rotatation and its inverse.
|
||||||
|
* That is, if you do:
|
||||||
|
*
|
||||||
|
* drm_rotate(&r, width, height, rotation);
|
||||||
|
* drm_rotate_inv(&r, width, height, rotation);
|
||||||
|
*
|
||||||
|
* you will always get back the original rectangle.
|
||||||
|
*/
|
||||||
|
void drm_rect_rotate_inv(struct drm_rect *r,
|
||||||
|
int width, int height,
|
||||||
|
unsigned int rotation)
|
||||||
|
{
|
||||||
|
struct drm_rect tmp;
|
||||||
|
|
||||||
|
switch (rotation & 0xf) {
|
||||||
|
case BIT(DRM_ROTATE_0):
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_90):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = width - tmp.y2;
|
||||||
|
r->x2 = width - tmp.y1;
|
||||||
|
r->y1 = tmp.x1;
|
||||||
|
r->y2 = tmp.x2;
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_180):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = width - tmp.x2;
|
||||||
|
r->x2 = width - tmp.x1;
|
||||||
|
r->y1 = height - tmp.y2;
|
||||||
|
r->y2 = height - tmp.y1;
|
||||||
|
break;
|
||||||
|
case BIT(DRM_ROTATE_270):
|
||||||
|
tmp = *r;
|
||||||
|
r->x1 = tmp.y1;
|
||||||
|
r->x2 = tmp.y2;
|
||||||
|
r->y1 = height - tmp.x2;
|
||||||
|
r->y2 = height - tmp.x1;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
|
||||||
|
tmp = *r;
|
||||||
|
|
||||||
|
if (rotation & BIT(DRM_REFLECT_X)) {
|
||||||
|
r->x1 = width - tmp.x2;
|
||||||
|
r->x2 = width - tmp.x1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rotation & BIT(DRM_REFLECT_Y)) {
|
||||||
|
r->y1 = height - tmp.y2;
|
||||||
|
r->y2 = height - tmp.y1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_rect_rotate_inv);
|
||||||
|
@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
|
|||||||
option changes the default for that module option.
|
option changes the default for that module option.
|
||||||
|
|
||||||
If in doubt, say "N".
|
If in doubt, say "N".
|
||||||
|
|
||||||
config DRM_I915_UMS
|
|
||||||
bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
|
|
||||||
depends on DRM_I915 && BROKEN
|
|
||||||
default n
|
|
||||||
help
|
|
||||||
Choose this option if you still need userspace modesetting.
|
|
||||||
|
|
||||||
Userspace modesetting is deprecated for quite some time now, so
|
|
||||||
enable this only if you have ancient versions of the DDX drivers.
|
|
||||||
|
|
||||||
If in doubt, say "N".
|
|
||||||
|
@ -1108,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
|||||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||||
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
|
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
|
||||||
} else if (IS_VALLEYVIEW(dev)) {
|
} else if (IS_VALLEYVIEW(dev)) {
|
||||||
u32 freq_sts, val;
|
u32 freq_sts;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->rps.hw_lock);
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||||
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
|
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
|
||||||
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
||||||
|
|
||||||
val = valleyview_rps_max_freq(dev_priv);
|
|
||||||
seq_printf(m, "max GPU freq: %d MHz\n",
|
seq_printf(m, "max GPU freq: %d MHz\n",
|
||||||
vlv_gpu_freq(dev_priv, val));
|
vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
|
||||||
|
|
||||||
val = valleyview_rps_min_freq(dev_priv);
|
|
||||||
seq_printf(m, "min GPU freq: %d MHz\n",
|
seq_printf(m, "min GPU freq: %d MHz\n",
|
||||||
vlv_gpu_freq(dev_priv, val));
|
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
|
||||||
|
|
||||||
|
seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
|
||||||
|
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
|
||||||
|
|
||||||
seq_printf(m, "current GPU freq: %d MHz\n",
|
seq_printf(m, "current GPU freq: %d MHz\n",
|
||||||
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
|
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
|
||||||
@ -1891,10 +1892,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
|
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
|
||||||
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
|
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
|
||||||
seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
|
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
|
||||||
seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
|
seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
|
||||||
|
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
|
||||||
|
dev_priv->psr.busy_frontbuffer_bits);
|
||||||
|
seq_printf(m, "Re-enable work scheduled: %s\n",
|
||||||
|
yesno(work_busy(&dev_priv->psr.work.work)));
|
||||||
|
|
||||||
enabled = HAS_PSR(dev) &&
|
enabled = HAS_PSR(dev) &&
|
||||||
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
||||||
@ -1904,6 +1910,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||||||
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
|
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
|
||||||
EDP_PSR_PERF_CNT_MASK;
|
EDP_PSR_PERF_CNT_MASK;
|
||||||
seq_printf(m, "Performance_Counter: %u\n", psrperf);
|
seq_printf(m, "Performance_Counter: %u\n", psrperf);
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
return 0;
|
return 0;
|
||||||
@ -1989,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
|
|||||||
|
|
||||||
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
|
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
|
||||||
seq_printf(m, "IRQs disabled: %s\n",
|
seq_printf(m, "IRQs disabled: %s\n",
|
||||||
yesno(dev_priv->pm.irqs_disabled));
|
yesno(!intel_irqs_enabled(dev_priv)));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -3284,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = inode->i_private;
|
struct drm_device *dev = inode->i_private;
|
||||||
|
|
||||||
if (!HAS_PCH_SPLIT(dev))
|
if (HAS_GMCH_DISPLAY(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return single_open(file, pri_wm_latency_show, dev);
|
return single_open(file, pri_wm_latency_show, dev);
|
||||||
@ -3294,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = inode->i_private;
|
struct drm_device *dev = inode->i_private;
|
||||||
|
|
||||||
if (!HAS_PCH_SPLIT(dev))
|
if (HAS_GMCH_DISPLAY(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return single_open(file, spr_wm_latency_show, dev);
|
return single_open(file, spr_wm_latency_show, dev);
|
||||||
@ -3304,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = inode->i_private;
|
struct drm_device *dev = inode->i_private;
|
||||||
|
|
||||||
if (!HAS_PCH_SPLIT(dev))
|
if (HAS_GMCH_DISPLAY(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return single_open(file, cur_wm_latency_show, dev);
|
return single_open(file, cur_wm_latency_show, dev);
|
||||||
@ -3656,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
|
|||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
val = vlv_freq_opcode(dev_priv, val);
|
val = vlv_freq_opcode(dev_priv, val);
|
||||||
|
|
||||||
hw_max = valleyview_rps_max_freq(dev_priv);
|
hw_max = dev_priv->rps.max_freq;
|
||||||
hw_min = valleyview_rps_min_freq(dev_priv);
|
hw_min = dev_priv->rps.min_freq;
|
||||||
} else {
|
} else {
|
||||||
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
||||||
|
|
||||||
@ -3737,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
|
|||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
val = vlv_freq_opcode(dev_priv, val);
|
val = vlv_freq_opcode(dev_priv, val);
|
||||||
|
|
||||||
hw_max = valleyview_rps_max_freq(dev_priv);
|
hw_max = dev_priv->rps.max_freq;
|
||||||
hw_min = valleyview_rps_min_freq(dev_priv);
|
hw_min = dev_priv->rps.min_freq;
|
||||||
} else {
|
} else {
|
||||||
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
||||||
|
|
||||||
|
@ -1340,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_gem_stolen;
|
goto cleanup_gem_stolen;
|
||||||
|
|
||||||
|
dev_priv->pm._irqs_disabled = false;
|
||||||
|
|
||||||
/* Important: The output setup functions called by modeset_init need
|
/* Important: The output setup functions called by modeset_init need
|
||||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||||
intel_modeset_init(dev);
|
intel_modeset_init(dev);
|
||||||
@ -1424,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_FB)
|
#if IS_ENABLED(CONFIG_FB)
|
||||||
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct apertures_struct *ap;
|
struct apertures_struct *ap;
|
||||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||||
bool primary;
|
bool primary;
|
||||||
|
int ret;
|
||||||
|
|
||||||
ap = alloc_apertures(1);
|
ap = alloc_apertures(1);
|
||||||
if (!ap)
|
if (!ap)
|
||||||
return;
|
return -ENOMEM;
|
||||||
|
|
||||||
ap->ranges[0].base = dev_priv->gtt.mappable_base;
|
ap->ranges[0].base = dev_priv->gtt.mappable_base;
|
||||||
ap->ranges[0].size = dev_priv->gtt.mappable_end;
|
ap->ranges[0].size = dev_priv->gtt.mappable_end;
|
||||||
@ -1440,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
|||||||
primary =
|
primary =
|
||||||
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
|
||||||
|
|
||||||
remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
|
ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
|
||||||
|
|
||||||
kfree(ap);
|
kfree(ap);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1664,7 +1670,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
|||||||
goto out_gtt;
|
goto out_gtt;
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_kick_out_firmware_fb(dev_priv);
|
ret = i915_kick_out_firmware_fb(dev_priv);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
|
||||||
|
goto out_gtt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_master(dev->pdev);
|
pci_set_master(dev->pdev);
|
||||||
|
@ -303,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
|
|||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||||
.has_llc = 1,
|
.has_llc = 1,
|
||||||
.has_ddi = 1,
|
.has_ddi = 1,
|
||||||
|
.has_fpga_dbg = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
GEN_DEFAULT_PIPEOFFSETS,
|
||||||
IVB_CURSOR_OFFSETS,
|
IVB_CURSOR_OFFSETS,
|
||||||
@ -314,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
|
|||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||||
.has_llc = 1,
|
.has_llc = 1,
|
||||||
.has_ddi = 1,
|
.has_ddi = 1,
|
||||||
|
.has_fpga_dbg = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
GEN_DEFAULT_PIPEOFFSETS,
|
||||||
IVB_CURSOR_OFFSETS,
|
IVB_CURSOR_OFFSETS,
|
||||||
@ -325,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
|
|||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
.has_llc = 1,
|
||||||
.has_ddi = 1,
|
.has_ddi = 1,
|
||||||
|
.has_fpga_dbg = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
GEN_DEFAULT_PIPEOFFSETS,
|
||||||
IVB_CURSOR_OFFSETS,
|
IVB_CURSOR_OFFSETS,
|
||||||
@ -336,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
|
|||||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||||
.has_llc = 1,
|
.has_llc = 1,
|
||||||
.has_ddi = 1,
|
.has_ddi = 1,
|
||||||
|
.has_fpga_dbg = 1,
|
||||||
.has_fbc = 1,
|
.has_fbc = 1,
|
||||||
GEN_DEFAULT_PIPEOFFSETS,
|
GEN_DEFAULT_PIPEOFFSETS,
|
||||||
IVB_CURSOR_OFFSETS,
|
IVB_CURSOR_OFFSETS,
|
||||||
@ -518,12 +522,11 @@ static int i915_drm_freeze(struct drm_device *dev)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable CRTCs directly since we want to preserve sw state
|
* Disable CRTCs directly since we want to preserve sw state
|
||||||
* for _thaw.
|
* for _thaw. Also, power gate the CRTC power wells.
|
||||||
*/
|
*/
|
||||||
drm_modeset_lock_all(dev);
|
drm_modeset_lock_all(dev);
|
||||||
for_each_crtc(dev, crtc) {
|
for_each_crtc(dev, crtc)
|
||||||
dev_priv->display.crtc_disable(crtc);
|
intel_crtc_control(crtc, false);
|
||||||
}
|
|
||||||
drm_modeset_unlock_all(dev);
|
drm_modeset_unlock_all(dev);
|
||||||
|
|
||||||
intel_dp_mst_suspend(dev);
|
intel_dp_mst_suspend(dev);
|
||||||
|
@ -179,6 +179,10 @@ enum hpd_pin {
|
|||||||
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
|
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
|
||||||
if ((intel_connector)->base.encoder == (__encoder))
|
if ((intel_connector)->base.encoder == (__encoder))
|
||||||
|
|
||||||
|
#define for_each_power_domain(domain, mask) \
|
||||||
|
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||||
|
if ((1 << (domain)) & (mask))
|
||||||
|
|
||||||
struct drm_i915_private;
|
struct drm_i915_private;
|
||||||
struct i915_mmu_object;
|
struct i915_mmu_object;
|
||||||
|
|
||||||
@ -436,8 +440,8 @@ struct drm_i915_display_funcs {
|
|||||||
void (*update_wm)(struct drm_crtc *crtc);
|
void (*update_wm)(struct drm_crtc *crtc);
|
||||||
void (*update_sprite_wm)(struct drm_plane *plane,
|
void (*update_sprite_wm)(struct drm_plane *plane,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
uint32_t sprite_width, int pixel_size,
|
uint32_t sprite_width, uint32_t sprite_height,
|
||||||
bool enable, bool scaled);
|
int pixel_size, bool enable, bool scaled);
|
||||||
void (*modeset_global_resources)(struct drm_device *dev);
|
void (*modeset_global_resources)(struct drm_device *dev);
|
||||||
/* Returns the active state of the crtc, and if the crtc is active,
|
/* Returns the active state of the crtc, and if the crtc is active,
|
||||||
* fills out the pipe-config with the hw state. */
|
* fills out the pipe-config with the hw state. */
|
||||||
@ -654,13 +658,15 @@ struct i915_drrs {
|
|||||||
struct intel_connector *connector;
|
struct intel_connector *connector;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct intel_dp;
|
||||||
struct i915_psr {
|
struct i915_psr {
|
||||||
|
struct mutex lock;
|
||||||
bool sink_support;
|
bool sink_support;
|
||||||
bool source_ok;
|
bool source_ok;
|
||||||
bool setup_done;
|
struct intel_dp *enabled;
|
||||||
bool enabled;
|
|
||||||
bool active;
|
bool active;
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
|
unsigned busy_frontbuffer_bits;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum intel_pch {
|
enum intel_pch {
|
||||||
@ -931,6 +937,7 @@ struct intel_gen6_power_mgmt {
|
|||||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||||
|
u32 cz_freq;
|
||||||
|
|
||||||
u32 ei_interrupt_count;
|
u32 ei_interrupt_count;
|
||||||
|
|
||||||
@ -1263,6 +1270,7 @@ struct intel_vbt_data {
|
|||||||
u16 pwm_freq_hz;
|
u16 pwm_freq_hz;
|
||||||
bool present;
|
bool present;
|
||||||
bool active_low_pwm;
|
bool active_low_pwm;
|
||||||
|
u8 min_brightness; /* min_brightness/255 of max */
|
||||||
} backlight;
|
} backlight;
|
||||||
|
|
||||||
/* MIPI DSI */
|
/* MIPI DSI */
|
||||||
@ -1332,7 +1340,7 @@ struct ilk_wm_values {
|
|||||||
*/
|
*/
|
||||||
struct i915_runtime_pm {
|
struct i915_runtime_pm {
|
||||||
bool suspended;
|
bool suspended;
|
||||||
bool irqs_disabled;
|
bool _irqs_disabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum intel_pipe_crc_source {
|
enum intel_pipe_crc_source {
|
||||||
@ -2093,6 +2101,8 @@ struct drm_i915_cmd_table {
|
|||||||
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
|
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
|
||||||
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
|
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
|
||||||
|
|
||||||
|
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
|
||||||
|
|
||||||
/* DPF == dynamic parity feature */
|
/* DPF == dynamic parity feature */
|
||||||
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||||
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
|
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
|
||||||
@ -2136,6 +2146,7 @@ struct i915_params {
|
|||||||
bool disable_display;
|
bool disable_display;
|
||||||
bool disable_vtd_wa;
|
bool disable_vtd_wa;
|
||||||
int use_mmio_flip;
|
int use_mmio_flip;
|
||||||
|
bool mmio_debug;
|
||||||
};
|
};
|
||||||
extern struct i915_params i915 __read_mostly;
|
extern struct i915_params i915 __read_mostly;
|
||||||
|
|
||||||
@ -2692,8 +2703,6 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
|
|||||||
extern void intel_init_pch_refclk(struct drm_device *dev);
|
extern void intel_init_pch_refclk(struct drm_device *dev);
|
||||||
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||||
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
|
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
|
||||||
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
|
|
||||||
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
|
|
||||||
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
|
||||||
bool enable);
|
bool enable);
|
||||||
extern void intel_detect_pch(struct drm_device *dev);
|
extern void intel_detect_pch(struct drm_device *dev);
|
||||||
@ -2803,10 +2812,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
|||||||
|
|
||||||
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (IS_VALLEYVIEW(dev))
|
||||||
return CPU_VGACNTRL;
|
|
||||||
else if (IS_VALLEYVIEW(dev))
|
|
||||||
return VLV_VGACNTRL;
|
return VLV_VGACNTRL;
|
||||||
|
else if (INTEL_INFO(dev)->gen >= 5)
|
||||||
|
return CPU_VGACNTRL;
|
||||||
else
|
else
|
||||||
return VGACNTRL;
|
return VGACNTRL;
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1161,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
|
|||||||
unsigned long timeout_expire;
|
unsigned long timeout_expire;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
|
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
|
||||||
|
|
||||||
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
|
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
|
||||||
return 0;
|
return 0;
|
||||||
@ -2927,8 +2927,6 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||||||
|
|
||||||
vma->unbind_vma(vma);
|
vma->unbind_vma(vma);
|
||||||
|
|
||||||
i915_gem_gtt_finish_object(obj);
|
|
||||||
|
|
||||||
list_del_init(&vma->mm_list);
|
list_del_init(&vma->mm_list);
|
||||||
/* Avoid an unnecessary call to unbind on rebind. */
|
/* Avoid an unnecessary call to unbind on rebind. */
|
||||||
if (i915_is_ggtt(vma->vm))
|
if (i915_is_ggtt(vma->vm))
|
||||||
@ -2939,8 +2937,10 @@ int i915_vma_unbind(struct i915_vma *vma)
|
|||||||
|
|
||||||
/* Since the unbound list is global, only move to that list if
|
/* Since the unbound list is global, only move to that list if
|
||||||
* no more VMAs exist. */
|
* no more VMAs exist. */
|
||||||
if (list_empty(&obj->vma_list))
|
if (list_empty(&obj->vma_list)) {
|
||||||
|
i915_gem_gtt_finish_object(obj);
|
||||||
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
|
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
|
||||||
|
}
|
||||||
|
|
||||||
/* And finally now the object is completely decoupled from this vma,
|
/* And finally now the object is completely decoupled from this vma,
|
||||||
* we can drop its hold on the backing storage and allow it to be
|
* we can drop its hold on the backing storage and allow it to be
|
||||||
@ -5194,8 +5194,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
|||||||
bool was_interruptible;
|
bool was_interruptible;
|
||||||
bool unlock;
|
bool unlock;
|
||||||
|
|
||||||
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
|
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
|
||||||
schedule_timeout_killable(1);
|
schedule_timeout_killable(1);
|
||||||
|
if (fatal_signal_pending(current))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
if (timeout == 0) {
|
if (timeout == 0) {
|
||||||
pr_err("Unable to purge GPU memory due lock contention.\n");
|
pr_err("Unable to purge GPU memory due lock contention.\n");
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
@ -186,14 +186,12 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||||||
/* We refcount even the aliasing PPGTT to keep the code symmetric */
|
/* We refcount even the aliasing PPGTT to keep the code symmetric */
|
||||||
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
|
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
|
||||||
ppgtt = ctx_to_ppgtt(ctx);
|
ppgtt = ctx_to_ppgtt(ctx);
|
||||||
|
|
||||||
/* XXX: Free up the object before tearing down the address space, in
|
|
||||||
* case we're bound in the PPGTT */
|
|
||||||
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppgtt)
|
if (ppgtt)
|
||||||
kref_put(&ppgtt->ref, ppgtt_release);
|
kref_put(&ppgtt->ref, ppgtt_release);
|
||||||
|
if (ctx->legacy_hw_ctx.rcs_state)
|
||||||
|
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
||||||
list_del(&ctx->link);
|
list_del(&ctx->link);
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,8 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Early VLV doesn't have this */
|
/* Early VLV doesn't have this */
|
||||||
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
|
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
|
||||||
|
dev->pdev->revision < 0xb) {
|
||||||
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -40,19 +40,87 @@ struct i915_mmu_notifier {
|
|||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
struct mmu_notifier mn;
|
struct mmu_notifier mn;
|
||||||
struct rb_root objects;
|
struct rb_root objects;
|
||||||
|
struct list_head linear;
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
unsigned long serial;
|
unsigned long serial;
|
||||||
|
bool has_linear;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct i915_mmu_object {
|
struct i915_mmu_object {
|
||||||
struct i915_mmu_notifier *mmu;
|
struct i915_mmu_notifier *mmu;
|
||||||
struct interval_tree_node it;
|
struct interval_tree_node it;
|
||||||
|
struct list_head link;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
bool is_linear;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->base.dev;
|
||||||
|
unsigned long end;
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
/* Cancel any active worker and force us to re-evaluate gup */
|
||||||
|
obj->userptr.work = NULL;
|
||||||
|
|
||||||
|
if (obj->pages != NULL) {
|
||||||
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
|
struct i915_vma *vma, *tmp;
|
||||||
|
bool was_interruptible;
|
||||||
|
|
||||||
|
was_interruptible = dev_priv->mm.interruptible;
|
||||||
|
dev_priv->mm.interruptible = false;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
|
||||||
|
int ret = i915_vma_unbind(vma);
|
||||||
|
WARN_ON(ret && ret != -EIO);
|
||||||
|
}
|
||||||
|
WARN_ON(i915_gem_object_put_pages(obj));
|
||||||
|
|
||||||
|
dev_priv->mm.interruptible = was_interruptible;
|
||||||
|
}
|
||||||
|
|
||||||
|
end = obj->userptr.ptr + obj->base.size;
|
||||||
|
|
||||||
|
drm_gem_object_unreference(&obj->base);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
return end;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
|
||||||
|
struct mm_struct *mm,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
struct i915_mmu_object *mmu;
|
||||||
|
unsigned long serial;
|
||||||
|
|
||||||
|
restart:
|
||||||
|
serial = mn->serial;
|
||||||
|
list_for_each_entry(mmu, &mn->linear, link) {
|
||||||
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
|
if (mmu->it.last < start || mmu->it.start > end)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
obj = mmu->obj;
|
||||||
|
drm_gem_object_reference(&obj->base);
|
||||||
|
spin_unlock(&mn->lock);
|
||||||
|
|
||||||
|
cancel_userptr(obj);
|
||||||
|
|
||||||
|
spin_lock(&mn->lock);
|
||||||
|
if (serial != mn->serial)
|
||||||
|
goto restart;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||||
struct mm_struct *mm,
|
struct mm_struct *mm,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
|||||||
{
|
{
|
||||||
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
|
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
|
||||||
struct interval_tree_node *it = NULL;
|
struct interval_tree_node *it = NULL;
|
||||||
|
unsigned long next = start;
|
||||||
unsigned long serial = 0;
|
unsigned long serial = 0;
|
||||||
|
|
||||||
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
|
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
|
||||||
while (start < end) {
|
while (next < end) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj = NULL;
|
||||||
|
|
||||||
obj = NULL;
|
|
||||||
spin_lock(&mn->lock);
|
spin_lock(&mn->lock);
|
||||||
if (serial == mn->serial)
|
if (mn->has_linear)
|
||||||
it = interval_tree_iter_next(it, start, end);
|
it = invalidate_range__linear(mn, mm, start, end);
|
||||||
|
else if (serial == mn->serial)
|
||||||
|
it = interval_tree_iter_next(it, next, end);
|
||||||
else
|
else
|
||||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||||
if (it != NULL) {
|
if (it != NULL) {
|
||||||
@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
|||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&mn->dev->struct_mutex);
|
next = cancel_userptr(obj);
|
||||||
/* Cancel any active worker and force us to re-evaluate gup */
|
|
||||||
obj->userptr.work = NULL;
|
|
||||||
|
|
||||||
if (obj->pages != NULL) {
|
|
||||||
struct drm_i915_private *dev_priv = to_i915(mn->dev);
|
|
||||||
struct i915_vma *vma, *tmp;
|
|
||||||
bool was_interruptible;
|
|
||||||
|
|
||||||
was_interruptible = dev_priv->mm.interruptible;
|
|
||||||
dev_priv->mm.interruptible = false;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
|
|
||||||
int ret = i915_vma_unbind(vma);
|
|
||||||
WARN_ON(ret && ret != -EIO);
|
|
||||||
}
|
|
||||||
WARN_ON(i915_gem_object_put_pages(obj));
|
|
||||||
|
|
||||||
dev_priv->mm.interruptible = was_interruptible;
|
|
||||||
}
|
|
||||||
|
|
||||||
start = obj->userptr.ptr + obj->base.size;
|
|
||||||
|
|
||||||
drm_gem_object_unreference(&obj->base);
|
|
||||||
mutex_unlock(&mn->dev->struct_mutex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
|
|||||||
mmu->mm = mm;
|
mmu->mm = mm;
|
||||||
mmu->objects = RB_ROOT;
|
mmu->objects = RB_ROOT;
|
||||||
mmu->count = 0;
|
mmu->count = 0;
|
||||||
mmu->serial = 0;
|
mmu->serial = 1;
|
||||||
|
INIT_LIST_HEAD(&mmu->linear);
|
||||||
|
mmu->has_linear = false;
|
||||||
|
|
||||||
/* Protected by mmap_sem (write-lock) */
|
/* Protected by mmap_sem (write-lock) */
|
||||||
ret = __mmu_notifier_register(&mmu->mn, mm);
|
ret = __mmu_notifier_register(&mmu->mn, mm);
|
||||||
@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
|
|||||||
mmu->serial = 1;
|
mmu->serial = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
|
||||||
|
{
|
||||||
|
struct i915_mmu_object *mn;
|
||||||
|
|
||||||
|
list_for_each_entry(mn, &mmu->linear, link)
|
||||||
|
if (mn->is_linear)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
|
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
|
||||||
struct i915_mmu_object *mn)
|
struct i915_mmu_object *mn)
|
||||||
@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
|
|||||||
lockdep_assert_held(&mmu->dev->struct_mutex);
|
lockdep_assert_held(&mmu->dev->struct_mutex);
|
||||||
|
|
||||||
spin_lock(&mmu->lock);
|
spin_lock(&mmu->lock);
|
||||||
interval_tree_remove(&mn->it, &mmu->objects);
|
list_del(&mn->link);
|
||||||
|
if (mn->is_linear)
|
||||||
|
mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
|
||||||
|
else
|
||||||
|
interval_tree_remove(&mn->it, &mmu->objects);
|
||||||
__i915_mmu_notifier_update_serial(mmu);
|
__i915_mmu_notifier_update_serial(mmu);
|
||||||
spin_unlock(&mmu->lock);
|
spin_unlock(&mmu->lock);
|
||||||
|
|
||||||
@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
|||||||
*/
|
*/
|
||||||
i915_gem_retire_requests(mmu->dev);
|
i915_gem_retire_requests(mmu->dev);
|
||||||
|
|
||||||
/* Disallow overlapping userptr objects */
|
|
||||||
spin_lock(&mmu->lock);
|
spin_lock(&mmu->lock);
|
||||||
it = interval_tree_iter_first(&mmu->objects,
|
it = interval_tree_iter_first(&mmu->objects,
|
||||||
mn->it.start, mn->it.last);
|
mn->it.start, mn->it.last);
|
||||||
@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
|
|||||||
* to flush their object references upon which the object will
|
* to flush their object references upon which the object will
|
||||||
* be removed from the interval-tree, or the the range is
|
* be removed from the interval-tree, or the the range is
|
||||||
* still in use by another client and the overlap is invalid.
|
* still in use by another client and the overlap is invalid.
|
||||||
|
*
|
||||||
|
* If we do have an overlap, we cannot use the interval tree
|
||||||
|
* for fast range invalidation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||||
ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
|
if (!obj->userptr.workers)
|
||||||
} else {
|
mmu->has_linear = mn->is_linear = true;
|
||||||
|
else
|
||||||
|
ret = -EAGAIN;
|
||||||
|
} else
|
||||||
interval_tree_insert(&mn->it, &mmu->objects);
|
interval_tree_insert(&mn->it, &mmu->objects);
|
||||||
|
|
||||||
|
if (ret == 0) {
|
||||||
|
list_add(&mn->link, &mmu->linear);
|
||||||
__i915_mmu_notifier_update_serial(mmu);
|
__i915_mmu_notifier_update_serial(mmu);
|
||||||
ret = 0;
|
|
||||||
}
|
}
|
||||||
spin_unlock(&mmu->lock);
|
spin_unlock(&mmu->lock);
|
||||||
mutex_unlock(&mmu->dev->struct_mutex);
|
mutex_unlock(&mmu->dev->struct_mutex);
|
||||||
@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
|
|||||||
* We impose several restrictions upon the memory being mapped
|
* We impose several restrictions upon the memory being mapped
|
||||||
* into the GPU.
|
* into the GPU.
|
||||||
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
|
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
|
||||||
* 2. It cannot overlap any other userptr object in the same address space.
|
* 2. It must be normal system memory, not a pointer into another map of IO
|
||||||
* 3. It must be normal system memory, not a pointer into another map of IO
|
|
||||||
* space (e.g. it must not be a GTT mmapping of another object).
|
* space (e.g. it must not be a GTT mmapping of another object).
|
||||||
* 4. We only allow a bo as large as we could in theory map into the GTT,
|
* 3. We only allow a bo as large as we could in theory map into the GTT,
|
||||||
* that is we limit the size to the total size of the GTT.
|
* that is we limit the size to the total size of the GTT.
|
||||||
* 5. The bo is marked as being snoopable. The backing pages are left
|
* 4. The bo is marked as being snoopable. The backing pages are left
|
||||||
* accessible directly by the CPU, but reads and writes by the GPU may
|
* accessible directly by the CPU, but reads and writes by the GPU may
|
||||||
* incur the cost of a snoop (unless you have an LLC architecture).
|
* incur the cost of a snoop (unless you have an LLC architecture).
|
||||||
*
|
*
|
||||||
|
@ -764,7 +764,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||||||
struct intel_engine_cs *ring,
|
struct intel_engine_cs *ring,
|
||||||
struct drm_i915_error_ring *ering)
|
struct drm_i915_error_ring *ering)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *useless;
|
struct intel_engine_cs *to;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
if (!i915_semaphore_is_enabled(dev_priv->dev))
|
||||||
@ -776,13 +776,20 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
|
|||||||
dev_priv->semaphore_obj,
|
dev_priv->semaphore_obj,
|
||||||
&dev_priv->gtt.base);
|
&dev_priv->gtt.base);
|
||||||
|
|
||||||
for_each_ring(useless, dev_priv, i) {
|
for_each_ring(to, dev_priv, i) {
|
||||||
u16 signal_offset =
|
int idx;
|
||||||
(GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
|
u16 signal_offset;
|
||||||
u32 *tmp = error->semaphore_obj->pages[0];
|
u32 *tmp;
|
||||||
|
|
||||||
ering->semaphore_mboxes[i] = tmp[signal_offset];
|
if (ring == to)
|
||||||
ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i];
|
continue;
|
||||||
|
|
||||||
|
signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
|
||||||
|
tmp = error->semaphore_obj->pages[0];
|
||||||
|
idx = intel_ring_sync_index(ring, to);
|
||||||
|
|
||||||
|
ering->semaphore_mboxes[idx] = tmp[signal_offset];
|
||||||
|
ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -920,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||||
|
if (!i915_gem_obj_ggtt_bound(obj))
|
||||||
|
continue;
|
||||||
|
|
||||||
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
|
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
|
||||||
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
|
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
|
||||||
break;
|
break;
|
||||||
|
@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
|||||||
{
|
{
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((dev_priv->irq_mask & mask) != 0) {
|
if ((dev_priv->irq_mask & mask) != 0) {
|
||||||
@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
|
|||||||
{
|
{
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (!intel_irqs_enabled(dev_priv))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((dev_priv->irq_mask & mask) != mask) {
|
if ((dev_priv->irq_mask & mask) != mask) {
|
||||||
@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
|
|||||||
{
|
{
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dev_priv->gt_irq_mask &= ~interrupt_mask;
|
dev_priv->gt_irq_mask &= ~interrupt_mask;
|
||||||
@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
|
|||||||
POSTING_READ(GTIMR);
|
POSTING_READ(GTIMR);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
ilk_update_gt_irq(dev_priv, mask, mask);
|
ilk_update_gt_irq(dev_priv, mask, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
ilk_update_gt_irq(dev_priv, mask, 0);
|
ilk_update_gt_irq(dev_priv, mask, 0);
|
||||||
}
|
}
|
||||||
@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
new_val = dev_priv->pm_irq_mask;
|
new_val = dev_priv->pm_irq_mask;
|
||||||
@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
snb_update_pm_irq(dev_priv, mask, mask);
|
snb_update_pm_irq(dev_priv, mask, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
snb_update_pm_irq(dev_priv, mask, 0);
|
snb_update_pm_irq(dev_priv, mask, 0);
|
||||||
}
|
}
|
||||||
@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
new_val = dev_priv->pm_irq_mask;
|
new_val = dev_priv->pm_irq_mask;
|
||||||
@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
bdw_update_pm_irq(dev_priv, mask, mask);
|
bdw_update_pm_irq(dev_priv, mask, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
|
||||||
{
|
{
|
||||||
bdw_update_pm_irq(dev_priv, mask, 0);
|
bdw_update_pm_irq(dev_priv, mask, 0);
|
||||||
}
|
}
|
||||||
@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
assert_spin_locked(&dev_priv->irq_lock);
|
assert_spin_locked(&dev_priv->irq_lock);
|
||||||
|
|
||||||
if (WARN_ON(dev_priv->pm.irqs_disabled))
|
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
I915_WRITE(SDEIMR, sdeimr);
|
I915_WRITE(SDEIMR, sdeimr);
|
||||||
@ -1407,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
|
|||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
pm_iir = dev_priv->rps.pm_iir;
|
pm_iir = dev_priv->rps.pm_iir;
|
||||||
dev_priv->rps.pm_iir = 0;
|
dev_priv->rps.pm_iir = 0;
|
||||||
if (IS_BROADWELL(dev_priv->dev))
|
if (INTEL_INFO(dev_priv->dev)->gen >= 8)
|
||||||
bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||||
else {
|
else {
|
||||||
/* Make sure not to corrupt PMIMR state used by ringbuffer */
|
/* Make sure not to corrupt PMIMR state used by ringbuffer */
|
||||||
snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
|
|
||||||
@ -1553,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
|||||||
out:
|
out:
|
||||||
WARN_ON(dev_priv->l3_parity.which_slice);
|
WARN_ON(dev_priv->l3_parity.which_slice);
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
|
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||||
@ -1567,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&dev_priv->irq_lock);
|
spin_lock(&dev_priv->irq_lock);
|
||||||
ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
|
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
|
||||||
spin_unlock(&dev_priv->irq_lock);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
iir &= GT_PARITY_ERROR(dev);
|
iir &= GT_PARITY_ERROR(dev);
|
||||||
@ -1622,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||||||
|
|
||||||
spin_lock(&dev_priv->irq_lock);
|
spin_lock(&dev_priv->irq_lock);
|
||||||
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||||
bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
||||||
spin_unlock(&dev_priv->irq_lock);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||||
@ -1969,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
|||||||
if (pm_iir & dev_priv->pm_rps_events) {
|
if (pm_iir & dev_priv->pm_rps_events) {
|
||||||
spin_lock(&dev_priv->irq_lock);
|
spin_lock(&dev_priv->irq_lock);
|
||||||
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||||
snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
|
||||||
spin_unlock(&dev_priv->irq_lock);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
queue_work(dev_priv->wq, &dev_priv->rps.work);
|
||||||
@ -3467,7 +3467,9 @@ static void gen8_irq_reset(struct drm_device *dev)
|
|||||||
gen8_gt_irq_reset(dev_priv);
|
gen8_gt_irq_reset(dev_priv);
|
||||||
|
|
||||||
for_each_pipe(pipe)
|
for_each_pipe(pipe)
|
||||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
if (intel_display_power_enabled(dev_priv,
|
||||||
|
POWER_DOMAIN_PIPE(pipe)))
|
||||||
|
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||||
|
|
||||||
GEN5_IRQ_RESET(GEN8_DE_PORT_);
|
GEN5_IRQ_RESET(GEN8_DE_PORT_);
|
||||||
GEN5_IRQ_RESET(GEN8_DE_MISC_);
|
GEN5_IRQ_RESET(GEN8_DE_MISC_);
|
||||||
@ -3476,6 +3478,18 @@ static void gen8_irq_reset(struct drm_device *dev)
|
|||||||
ibx_irq_reset(dev);
|
ibx_irq_reset(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||||
|
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
|
||||||
|
~dev_priv->de_irq_mask[PIPE_B]);
|
||||||
|
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
|
||||||
|
~dev_priv->de_irq_mask[PIPE_C]);
|
||||||
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||||
|
}
|
||||||
|
|
||||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -3800,8 +3814,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||||||
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
|
||||||
|
|
||||||
for_each_pipe(pipe)
|
for_each_pipe(pipe)
|
||||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
|
if (intel_display_power_enabled(dev_priv,
|
||||||
de_pipe_enables);
|
POWER_DOMAIN_PIPE(pipe)))
|
||||||
|
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
|
||||||
|
dev_priv->de_irq_mask[pipe],
|
||||||
|
de_pipe_enables);
|
||||||
|
|
||||||
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
|
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
|
||||||
}
|
}
|
||||||
@ -4652,6 +4669,9 @@ void intel_irq_init(struct drm_device *dev)
|
|||||||
|
|
||||||
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
|
||||||
|
|
||||||
|
/* Haven't installed the IRQ handler yet */
|
||||||
|
dev_priv->pm._irqs_disabled = true;
|
||||||
|
|
||||||
if (IS_GEN2(dev)) {
|
if (IS_GEN2(dev)) {
|
||||||
dev->max_vblank_count = 0;
|
dev->max_vblank_count = 0;
|
||||||
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
|
||||||
@ -4759,7 +4779,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
dev->driver->irq_uninstall(dev);
|
dev->driver->irq_uninstall(dev);
|
||||||
dev_priv->pm.irqs_disabled = true;
|
dev_priv->pm._irqs_disabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Restore interrupts so we can recover from runtime PM. */
|
/* Restore interrupts so we can recover from runtime PM. */
|
||||||
@ -4767,7 +4787,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
|
|||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
dev_priv->pm.irqs_disabled = false;
|
dev_priv->pm._irqs_disabled = false;
|
||||||
dev->driver->irq_preinstall(dev);
|
dev->driver->irq_preinstall(dev);
|
||||||
dev->driver->irq_postinstall(dev);
|
dev->driver->irq_postinstall(dev);
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
|
|||||||
.enable_fbc = -1,
|
.enable_fbc = -1,
|
||||||
.enable_hangcheck = true,
|
.enable_hangcheck = true,
|
||||||
.enable_ppgtt = -1,
|
.enable_ppgtt = -1,
|
||||||
.enable_psr = 0,
|
.enable_psr = 1,
|
||||||
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
|
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
|
||||||
.disable_power_well = 1,
|
.disable_power_well = 1,
|
||||||
.enable_ips = 1,
|
.enable_ips = 1,
|
||||||
@ -49,6 +49,7 @@ struct i915_params i915 __read_mostly = {
|
|||||||
.enable_cmd_parser = 1,
|
.enable_cmd_parser = 1,
|
||||||
.disable_vtd_wa = 0,
|
.disable_vtd_wa = 0,
|
||||||
.use_mmio_flip = 0,
|
.use_mmio_flip = 0,
|
||||||
|
.mmio_debug = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
module_param_named(modeset, i915.modeset, int, 0400);
|
module_param_named(modeset, i915.modeset, int, 0400);
|
||||||
@ -118,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
|
|||||||
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
||||||
|
|
||||||
module_param_named(enable_psr, i915.enable_psr, int, 0600);
|
module_param_named(enable_psr, i915.enable_psr, int, 0600);
|
||||||
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
|
||||||
|
|
||||||
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
|
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
|
||||||
MODULE_PARM_DESC(preliminary_hw_support,
|
MODULE_PARM_DESC(preliminary_hw_support,
|
||||||
@ -161,3 +162,8 @@ MODULE_PARM_DESC(enable_cmd_parser,
|
|||||||
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
|
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
|
||||||
MODULE_PARM_DESC(use_mmio_flip,
|
MODULE_PARM_DESC(use_mmio_flip,
|
||||||
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
|
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
|
||||||
|
|
||||||
|
module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
|
||||||
|
MODULE_PARM_DESC(mmio_debug,
|
||||||
|
"Enable the MMIO debug code (default: false). This may negatively "
|
||||||
|
"affect performance.");
|
||||||
|
@ -2281,7 +2281,7 @@ enum punit_power_well {
|
|||||||
/* Same as Haswell, but 72064 bytes now. */
|
/* Same as Haswell, but 72064 bytes now. */
|
||||||
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
|
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
|
||||||
|
|
||||||
|
#define CHV_CLK_CTL1 0x101100
|
||||||
#define VLV_CLK_CTL2 0x101104
|
#define VLV_CLK_CTL2 0x101104
|
||||||
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
|
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
|
||||||
|
|
||||||
@ -5538,6 +5538,12 @@ enum punit_power_well {
|
|||||||
GEN6_PM_RP_DOWN_THRESHOLD | \
|
GEN6_PM_RP_DOWN_THRESHOLD | \
|
||||||
GEN6_PM_RP_DOWN_TIMEOUT)
|
GEN6_PM_RP_DOWN_TIMEOUT)
|
||||||
|
|
||||||
|
#define CHV_CZ_CLOCK_FREQ_MODE_200 200
|
||||||
|
#define CHV_CZ_CLOCK_FREQ_MODE_267 267
|
||||||
|
#define CHV_CZ_CLOCK_FREQ_MODE_320 320
|
||||||
|
#define CHV_CZ_CLOCK_FREQ_MODE_333 333
|
||||||
|
#define CHV_CZ_CLOCK_FREQ_MODE_400 400
|
||||||
|
|
||||||
#define GEN7_GT_SCRATCH_BASE 0x4F100
|
#define GEN7_GT_SCRATCH_BASE 0x4F100
|
||||||
#define GEN7_GT_SCRATCH_REG_NUM 8
|
#define GEN7_GT_SCRATCH_REG_NUM 8
|
||||||
|
|
||||||
|
@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
|||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
|
||||||
/* On VLV, residency time is in CZ units rather than 1.28us */
|
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
|
||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
u32 clkctl2;
|
u32 reg, czcount_30ns;
|
||||||
|
|
||||||
clkctl2 = I915_READ(VLV_CLK_CTL2) >>
|
if (IS_CHERRYVIEW(dev))
|
||||||
CLK_CTL2_CZCOUNT_30NS_SHIFT;
|
reg = CHV_CLK_CTL1;
|
||||||
if (!clkctl2) {
|
else
|
||||||
WARN(!clkctl2, "bogus CZ count value");
|
reg = VLV_CLK_CTL2;
|
||||||
|
|
||||||
|
czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
|
||||||
|
|
||||||
|
if (!czcount_30ns) {
|
||||||
|
WARN(!czcount_30ns, "bogus CZ count value");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
|
|
||||||
|
units = 0;
|
||||||
|
div = 1000000ULL;
|
||||||
|
|
||||||
|
if (IS_CHERRYVIEW(dev)) {
|
||||||
|
/* Special case for 320Mhz */
|
||||||
|
if (czcount_30ns == 1) {
|
||||||
|
div = 10000000ULL;
|
||||||
|
units = 3125ULL;
|
||||||
|
} else {
|
||||||
|
/* chv counts are one less */
|
||||||
|
czcount_30ns += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (units == 0)
|
||||||
|
units = DIV_ROUND_UP_ULL(30ULL * bias,
|
||||||
|
(u64)czcount_30ns);
|
||||||
|
|
||||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||||
units <<= 8;
|
units <<= 8;
|
||||||
|
|
||||||
div = 1000000ULL * bias;
|
div = div * bias;
|
||||||
}
|
}
|
||||||
|
|
||||||
raw_time = I915_READ(reg) * units;
|
raw_time = I915_READ(reg) * units;
|
||||||
@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
|
|||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (attr == &dev_attr_gt_RP0_freq_mhz) {
|
if (attr == &dev_attr_gt_RP0_freq_mhz) {
|
||||||
val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
|
if (IS_VALLEYVIEW(dev))
|
||||||
|
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
|
||||||
|
else
|
||||||
|
val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
|
||||||
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
|
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
|
||||||
val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
|
if (IS_VALLEYVIEW(dev))
|
||||||
|
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
|
||||||
|
else
|
||||||
|
val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
|
||||||
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
|
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
|
||||||
val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
|
if (IS_VALLEYVIEW(dev))
|
||||||
|
val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
|
||||||
|
else
|
||||||
|
val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
|
||||||
} else {
|
} else {
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
|
|||||||
&dev_attr_gt_cur_freq_mhz.attr,
|
&dev_attr_gt_cur_freq_mhz.attr,
|
||||||
&dev_attr_gt_max_freq_mhz.attr,
|
&dev_attr_gt_max_freq_mhz.attr,
|
||||||
&dev_attr_gt_min_freq_mhz.attr,
|
&dev_attr_gt_min_freq_mhz.attr,
|
||||||
|
&dev_attr_gt_RP0_freq_mhz.attr,
|
||||||
|
&dev_attr_gt_RP1_freq_mhz.attr,
|
||||||
|
&dev_attr_gt_RPn_freq_mhz.attr,
|
||||||
&dev_attr_vlv_rpe_freq_mhz.attr,
|
&dev_attr_vlv_rpe_freq_mhz.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
|||||||
|
|
||||||
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
|
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
|
||||||
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
|
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
|
||||||
|
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
|
||||||
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
|
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
|
||||||
"active %s, min brightness %u, level %u\n",
|
"active %s, min brightness %u, level %u\n",
|
||||||
dev_priv->vbt.backlight.pwm_freq_hz,
|
dev_priv->vbt.backlight.pwm_freq_hz,
|
||||||
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
|
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
|
||||||
entry->min_brightness,
|
dev_priv->vbt.backlight.min_brightness,
|
||||||
backlight_data->level[panel_type]);
|
backlight_data->level[panel_type]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
|||||||
struct intel_load_detect_pipe tmp;
|
struct intel_load_detect_pipe tmp;
|
||||||
struct drm_modeset_acquire_ctx ctx;
|
struct drm_modeset_acquire_ctx ctx;
|
||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
|
||||||
connector->base.id, connector->name,
|
connector->base.id, connector->name,
|
||||||
force);
|
force);
|
||||||
@ -713,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
intel_display_power_put(dev_priv, power_domain);
|
intel_display_power_put(dev_priv, power_domain);
|
||||||
intel_runtime_pm_put(dev_priv);
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1243,7 +1243,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
|||||||
return val & WRPLL_PLL_ENABLE;
|
return val & WRPLL_PLL_ENABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *hsw_ddi_pll_names[] = {
|
static const char * const hsw_ddi_pll_names[] = {
|
||||||
"WRPLL 1",
|
"WRPLL 1",
|
||||||
"WRPLL 2",
|
"WRPLL 2",
|
||||||
};
|
};
|
||||||
|
@ -3855,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* use legacy palette for Ironlake */
|
/* use legacy palette for Ironlake */
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (!HAS_GMCH_DISPLAY(dev))
|
||||||
palreg = LGC_PALETTE(pipe);
|
palreg = LGC_PALETTE(pipe);
|
||||||
|
|
||||||
/* Workaround : Do not read or write the pipe palette/gamma data while
|
/* Workaround : Do not read or write the pipe palette/gamma data while
|
||||||
@ -4894,35 +4894,21 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/* Master function to enable/disable CRTC and corresponding power wells */
|
||||||
* Sets the power management mode of the pipe and plane.
|
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
|
||||||
*/
|
|
||||||
void intel_crtc_update_dpms(struct drm_crtc *crtc)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = crtc->dev;
|
struct drm_device *dev = crtc->dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct intel_encoder *intel_encoder;
|
|
||||||
enum intel_display_power_domain domain;
|
enum intel_display_power_domain domain;
|
||||||
unsigned long domains;
|
unsigned long domains;
|
||||||
bool enable = false;
|
|
||||||
|
|
||||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
|
|
||||||
enable |= intel_encoder->connectors_active;
|
|
||||||
|
|
||||||
if (enable) {
|
if (enable) {
|
||||||
if (!intel_crtc->active) {
|
if (!intel_crtc->active) {
|
||||||
/*
|
domains = get_crtc_power_domains(crtc);
|
||||||
* FIXME: DDI plls and relevant code isn't converted
|
for_each_power_domain(domain, domains)
|
||||||
* yet, so do runtime PM for DPMS only for all other
|
intel_display_power_get(dev_priv, domain);
|
||||||
* platforms for now.
|
intel_crtc->enabled_power_domains = domains;
|
||||||
*/
|
|
||||||
if (!HAS_DDI(dev)) {
|
|
||||||
domains = get_crtc_power_domains(crtc);
|
|
||||||
for_each_power_domain(domain, domains)
|
|
||||||
intel_display_power_get(dev_priv, domain);
|
|
||||||
intel_crtc->enabled_power_domains = domains;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_priv->display.crtc_enable(crtc);
|
dev_priv->display.crtc_enable(crtc);
|
||||||
}
|
}
|
||||||
@ -4930,14 +4916,27 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
|
|||||||
if (intel_crtc->active) {
|
if (intel_crtc->active) {
|
||||||
dev_priv->display.crtc_disable(crtc);
|
dev_priv->display.crtc_disable(crtc);
|
||||||
|
|
||||||
if (!HAS_DDI(dev)) {
|
domains = intel_crtc->enabled_power_domains;
|
||||||
domains = intel_crtc->enabled_power_domains;
|
for_each_power_domain(domain, domains)
|
||||||
for_each_power_domain(domain, domains)
|
intel_display_power_put(dev_priv, domain);
|
||||||
intel_display_power_put(dev_priv, domain);
|
intel_crtc->enabled_power_domains = 0;
|
||||||
intel_crtc->enabled_power_domains = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the power management mode of the pipe and plane.
|
||||||
|
*/
|
||||||
|
void intel_crtc_update_dpms(struct drm_crtc *crtc)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = crtc->dev;
|
||||||
|
struct intel_encoder *intel_encoder;
|
||||||
|
bool enable = false;
|
||||||
|
|
||||||
|
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
|
||||||
|
enable |= intel_encoder->connectors_active;
|
||||||
|
|
||||||
|
intel_crtc_control(crtc, enable);
|
||||||
|
|
||||||
intel_crtc_update_sarea(crtc, enable);
|
intel_crtc_update_sarea(crtc, enable);
|
||||||
}
|
}
|
||||||
@ -4957,10 +4956,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
|
|||||||
intel_crtc_update_sarea(crtc, false);
|
intel_crtc_update_sarea(crtc, false);
|
||||||
dev_priv->display.off(crtc);
|
dev_priv->display.off(crtc);
|
||||||
|
|
||||||
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
|
|
||||||
assert_cursor_disabled(dev_priv, pipe);
|
|
||||||
assert_pipe_disabled(dev->dev_private, pipe);
|
|
||||||
|
|
||||||
if (crtc->primary->fb) {
|
if (crtc->primary->fb) {
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
intel_unpin_fb_obj(old_obj);
|
intel_unpin_fb_obj(old_obj);
|
||||||
@ -7360,8 +7355,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||||||
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
|
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
|
||||||
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
|
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
|
||||||
"CPU PWM1 enabled\n");
|
"CPU PWM1 enabled\n");
|
||||||
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
|
if (IS_HASWELL(dev))
|
||||||
"CPU PWM2 enabled\n");
|
WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
|
||||||
|
"CPU PWM2 enabled\n");
|
||||||
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
|
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
|
||||||
"PCH PWM1 enabled\n");
|
"PCH PWM1 enabled\n");
|
||||||
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
|
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
|
||||||
@ -7374,7 +7370,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|||||||
* gen-specific and since we only disable LCPLL after we fully disable
|
* gen-specific and since we only disable LCPLL after we fully disable
|
||||||
* the interrupts, the check below should be enough.
|
* the interrupts, the check below should be enough.
|
||||||
*/
|
*/
|
||||||
WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
|
WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
|
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
|
||||||
@ -8817,7 +8813,7 @@ static void intel_increase_pllclock(struct drm_device *dev,
|
|||||||
int dpll_reg = DPLL(pipe);
|
int dpll_reg = DPLL(pipe);
|
||||||
int dpll;
|
int dpll;
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (!HAS_GMCH_DISPLAY(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!dev_priv->lvds_downclock_avail)
|
if (!dev_priv->lvds_downclock_avail)
|
||||||
@ -8845,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (!HAS_GMCH_DISPLAY(dev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!dev_priv->lvds_downclock_avail)
|
if (!dev_priv->lvds_downclock_avail)
|
||||||
@ -8976,7 +8972,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||||||
|
|
||||||
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
||||||
|
|
||||||
intel_edp_psr_exit(dev);
|
intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -9002,7 +8998,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
|
|||||||
|
|
||||||
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
||||||
|
|
||||||
intel_edp_psr_exit(dev);
|
intel_edp_psr_flush(dev, frontbuffer_bits);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -12825,6 +12821,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
|||||||
encoder->base.base.id,
|
encoder->base.base.id,
|
||||||
encoder->base.name);
|
encoder->base.name);
|
||||||
encoder->disable(encoder);
|
encoder->disable(encoder);
|
||||||
|
if (encoder->post_disable)
|
||||||
|
encoder->post_disable(encoder);
|
||||||
}
|
}
|
||||||
encoder->base.crtc = NULL;
|
encoder->base.crtc = NULL;
|
||||||
encoder->connectors_active = false;
|
encoder->connectors_active = false;
|
||||||
@ -13093,6 +13091,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||||||
*/
|
*/
|
||||||
drm_irq_uninstall(dev);
|
drm_irq_uninstall(dev);
|
||||||
cancel_work_sync(&dev_priv->hotplug_work);
|
cancel_work_sync(&dev_priv->hotplug_work);
|
||||||
|
dev_priv->pm._irqs_disabled = true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Due to the hpd irq storm handling the hotplug work can re-arm the
|
* Due to the hpd irq storm handling the hotplug work can re-arm the
|
||||||
* poll handlers. Hence disable polling after hpd handling is shut down.
|
* poll handlers. Hence disable polling after hpd handling is shut down.
|
||||||
@ -13270,7 +13270,7 @@ intel_display_capture_error_state(struct drm_device *dev)
|
|||||||
|
|
||||||
error->pipe[i].source = I915_READ(PIPESRC(i));
|
error->pipe[i].source = I915_READ(PIPESRC(i));
|
||||||
|
|
||||||
if (!HAS_PCH_SPLIT(dev))
|
if (HAS_GMCH_DISPLAY(dev))
|
||||||
error->pipe[i].stat = I915_READ(PIPESTAT(i));
|
error->pipe[i].stat = I915_READ(PIPESTAT(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1682,9 +1682,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct edp_vsc_psr psr_vsc;
|
struct edp_vsc_psr psr_vsc;
|
||||||
|
|
||||||
if (dev_priv->psr.setup_done)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
|
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
|
||||||
memset(&psr_vsc, 0, sizeof(psr_vsc));
|
memset(&psr_vsc, 0, sizeof(psr_vsc));
|
||||||
psr_vsc.sdp_header.HB0 = 0;
|
psr_vsc.sdp_header.HB0 = 0;
|
||||||
@ -1696,8 +1693,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||||
|
|
||||||
dev_priv->psr.setup_done = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
|
||||||
@ -1768,20 +1763,17 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||||
struct drm_device *dev = dig_port->base.base.dev;
|
struct drm_device *dev = dig_port->base.base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||||
struct intel_crtc *intel_crtc;
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||||
struct drm_i915_gem_object *obj;
|
|
||||||
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
|
lockdep_assert_held(&dev_priv->psr.lock);
|
||||||
|
lockdep_assert_held(&dev->struct_mutex);
|
||||||
|
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
||||||
|
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||||
|
|
||||||
dev_priv->psr.source_ok = false;
|
dev_priv->psr.source_ok = false;
|
||||||
|
|
||||||
if (!HAS_PSR(dev)) {
|
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
|
||||||
DRM_DEBUG_KMS("PSR not supported on this platform\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP ||
|
|
||||||
dig_port->port != PORT_A)) {
|
|
||||||
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -1791,34 +1783,10 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
crtc = dig_port->base.base.crtc;
|
|
||||||
if (crtc == NULL) {
|
|
||||||
DRM_DEBUG_KMS("crtc not active for PSR\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
intel_crtc = to_intel_crtc(crtc);
|
|
||||||
if (!intel_crtc_active(crtc)) {
|
|
||||||
DRM_DEBUG_KMS("crtc not active for PSR\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
obj = intel_fb_obj(crtc->primary->fb);
|
|
||||||
if (obj->tiling_mode != I915_TILING_X ||
|
|
||||||
obj->fence_reg == I915_FENCE_REG_NONE) {
|
|
||||||
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Below limitations aren't valid for Broadwell */
|
/* Below limitations aren't valid for Broadwell */
|
||||||
if (IS_BROADWELL(dev))
|
if (IS_BROADWELL(dev))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
|
|
||||||
DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
|
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
|
||||||
S3D_ENABLE) {
|
S3D_ENABLE) {
|
||||||
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
|
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
|
||||||
@ -1841,8 +1809,9 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
|
|||||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (intel_edp_is_psr_enabled(dev))
|
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||||
return;
|
WARN_ON(dev_priv->psr.active);
|
||||||
|
lockdep_assert_held(&dev_priv->psr.lock);
|
||||||
|
|
||||||
/* Enable PSR on the panel */
|
/* Enable PSR on the panel */
|
||||||
intel_edp_psr_enable_sink(intel_dp);
|
intel_edp_psr_enable_sink(intel_dp);
|
||||||
@ -1850,13 +1819,13 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
|
|||||||
/* Enable PSR on the host */
|
/* Enable PSR on the host */
|
||||||
intel_edp_psr_enable_source(intel_dp);
|
intel_edp_psr_enable_source(intel_dp);
|
||||||
|
|
||||||
dev_priv->psr.enabled = true;
|
|
||||||
dev_priv->psr.active = true;
|
dev_priv->psr.active = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_edp_psr_enable(struct intel_dp *intel_dp)
|
void intel_edp_psr_enable(struct intel_dp *intel_dp)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!HAS_PSR(dev)) {
|
if (!HAS_PSR(dev)) {
|
||||||
DRM_DEBUG_KMS("PSR not supported on this platform\n");
|
DRM_DEBUG_KMS("PSR not supported on this platform\n");
|
||||||
@ -1868,11 +1837,21 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
|
if (dev_priv->psr.enabled) {
|
||||||
|
DRM_DEBUG_KMS("PSR already in use\n");
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||||
|
|
||||||
/* Setup PSR once */
|
/* Setup PSR once */
|
||||||
intel_edp_psr_setup(intel_dp);
|
intel_edp_psr_setup(intel_dp);
|
||||||
|
|
||||||
if (intel_edp_psr_match_conditions(intel_dp))
|
if (intel_edp_psr_match_conditions(intel_dp))
|
||||||
intel_edp_psr_do_enable(intel_dp);
|
dev_priv->psr.enabled = intel_dp;
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_edp_psr_disable(struct intel_dp *intel_dp)
|
void intel_edp_psr_disable(struct intel_dp *intel_dp)
|
||||||
@ -1880,76 +1859,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
|
|||||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!dev_priv->psr.enabled)
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
|
if (!dev_priv->psr.enabled) {
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
I915_WRITE(EDP_PSR_CTL(dev),
|
if (dev_priv->psr.active) {
|
||||||
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
|
I915_WRITE(EDP_PSR_CTL(dev),
|
||||||
|
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
|
||||||
|
|
||||||
/* Wait till PSR is idle */
|
/* Wait till PSR is idle */
|
||||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
|
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
|
||||||
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
||||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||||
|
|
||||||
dev_priv->psr.enabled = false;
|
dev_priv->psr.active = false;
|
||||||
|
} else {
|
||||||
|
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->psr.enabled = NULL;
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
|
|
||||||
|
cancel_delayed_work_sync(&dev_priv->psr.work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_edp_psr_work(struct work_struct *work)
|
static void intel_edp_psr_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv =
|
struct drm_i915_private *dev_priv =
|
||||||
container_of(work, typeof(*dev_priv), psr.work.work);
|
container_of(work, typeof(*dev_priv), psr.work.work);
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct intel_dp *intel_dp = dev_priv->psr.enabled;
|
||||||
struct intel_encoder *encoder;
|
|
||||||
struct intel_dp *intel_dp = NULL;
|
|
||||||
|
|
||||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
intel_dp = dev_priv->psr.enabled;
|
||||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
|
||||||
|
|
||||||
if (!intel_edp_psr_match_conditions(intel_dp))
|
if (!intel_dp)
|
||||||
intel_edp_psr_disable(intel_dp);
|
goto unlock;
|
||||||
else
|
|
||||||
intel_edp_psr_do_enable(intel_dp);
|
/*
|
||||||
}
|
* The delayed work can race with an invalidate hence we need to
|
||||||
|
* recheck. Since psr_flush first clears this and then reschedules we
|
||||||
|
* won't ever miss a flush when bailing out here.
|
||||||
|
*/
|
||||||
|
if (dev_priv->psr.busy_frontbuffer_bits)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
intel_edp_psr_do_enable(intel_dp);
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_edp_psr_inactivate(struct drm_device *dev)
|
static void intel_edp_psr_do_exit(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
dev_priv->psr.active = false;
|
if (dev_priv->psr.active) {
|
||||||
|
u32 val = I915_READ(EDP_PSR_CTL(dev));
|
||||||
|
|
||||||
|
WARN_ON(!(val & EDP_PSR_ENABLE));
|
||||||
|
|
||||||
|
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
|
||||||
|
|
||||||
|
dev_priv->psr.active = false;
|
||||||
|
}
|
||||||
|
|
||||||
I915_WRITE(EDP_PSR_CTL(dev), I915_READ(EDP_PSR_CTL(dev))
|
|
||||||
& ~EDP_PSR_ENABLE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_edp_psr_exit(struct drm_device *dev)
|
void intel_edp_psr_invalidate(struct drm_device *dev,
|
||||||
|
unsigned frontbuffer_bits)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_crtc *crtc;
|
||||||
|
enum pipe pipe;
|
||||||
|
|
||||||
if (!HAS_PSR(dev))
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
|
if (!dev_priv->psr.enabled) {
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (!dev_priv->psr.setup_done)
|
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||||
|
pipe = to_intel_crtc(crtc)->pipe;
|
||||||
|
|
||||||
|
intel_edp_psr_do_exit(dev);
|
||||||
|
|
||||||
|
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||||
|
|
||||||
|
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void intel_edp_psr_flush(struct drm_device *dev,
|
||||||
|
unsigned frontbuffer_bits)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct drm_crtc *crtc;
|
||||||
|
enum pipe pipe;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->psr.lock);
|
||||||
|
if (!dev_priv->psr.enabled) {
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
cancel_delayed_work_sync(&dev_priv->psr.work);
|
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||||
|
pipe = to_intel_crtc(crtc)->pipe;
|
||||||
|
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||||
|
|
||||||
if (dev_priv->psr.active)
|
/*
|
||||||
intel_edp_psr_inactivate(dev);
|
* On Haswell sprite plane updates don't result in a psr invalidating
|
||||||
|
* signal in the hardware. Which means we need to manually fake this in
|
||||||
|
* software for all flushes, not just when we've seen a preceding
|
||||||
|
* invalidation through frontbuffer rendering.
|
||||||
|
*/
|
||||||
|
if (IS_HASWELL(dev) &&
|
||||||
|
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
|
||||||
|
intel_edp_psr_do_exit(dev);
|
||||||
|
|
||||||
schedule_delayed_work(&dev_priv->psr.work,
|
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||||
msecs_to_jiffies(100));
|
schedule_delayed_work(&dev_priv->psr.work,
|
||||||
|
msecs_to_jiffies(100));
|
||||||
|
mutex_unlock(&dev_priv->psr.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_edp_psr_init(struct drm_device *dev)
|
void intel_edp_psr_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!HAS_PSR(dev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
|
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
|
||||||
|
mutex_init(&dev_priv->psr.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_disable_dp(struct intel_encoder *encoder)
|
static void intel_disable_dp(struct intel_encoder *encoder)
|
||||||
@ -3681,8 +3720,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||||||
struct edid *edid = NULL;
|
struct edid *edid = NULL;
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
|
||||||
|
|
||||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||||
intel_display_power_get(dev_priv, power_domain);
|
intel_display_power_get(dev_priv, power_domain);
|
||||||
|
|
||||||
@ -3735,9 +3772,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
intel_display_power_put(dev_priv, power_domain);
|
intel_display_power_put(dev_priv, power_domain);
|
||||||
|
|
||||||
intel_runtime_pm_put(dev_priv);
|
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4262,6 +4296,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: This needs proper synchronization with psr state. But really
|
||||||
|
* hard to tell without seeing the user of this function of this code.
|
||||||
|
* Check locking and ordering once that lands.
|
||||||
|
*/
|
||||||
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
|
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
|
||||||
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
|
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
|
||||||
return;
|
return;
|
||||||
|
@ -166,6 +166,7 @@ struct intel_panel {
|
|||||||
struct {
|
struct {
|
||||||
bool present;
|
bool present;
|
||||||
u32 level;
|
u32 level;
|
||||||
|
u32 min;
|
||||||
u32 max;
|
u32 max;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
bool combination_mode; /* gen 2/4 only */
|
bool combination_mode; /* gen 2/4 only */
|
||||||
@ -431,6 +432,7 @@ struct intel_crtc {
|
|||||||
|
|
||||||
struct intel_plane_wm_parameters {
|
struct intel_plane_wm_parameters {
|
||||||
uint32_t horiz_pixels;
|
uint32_t horiz_pixels;
|
||||||
|
uint32_t vert_pixels;
|
||||||
uint8_t bytes_per_pixel;
|
uint8_t bytes_per_pixel;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
bool scaled;
|
bool scaled;
|
||||||
@ -506,6 +508,7 @@ struct intel_hdmi {
|
|||||||
bool has_audio;
|
bool has_audio;
|
||||||
enum hdmi_force_audio force_audio;
|
enum hdmi_force_audio force_audio;
|
||||||
bool rgb_quant_range_selectable;
|
bool rgb_quant_range_selectable;
|
||||||
|
enum hdmi_picture_aspect aspect_ratio;
|
||||||
void (*write_infoframe)(struct drm_encoder *encoder,
|
void (*write_infoframe)(struct drm_encoder *encoder,
|
||||||
enum hdmi_infoframe_type type,
|
enum hdmi_infoframe_type type,
|
||||||
const void *frame, ssize_t len);
|
const void *frame, ssize_t len);
|
||||||
@ -711,17 +714,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
|||||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
enum transcoder pch_transcoder,
|
enum transcoder pch_transcoder,
|
||||||
bool enable);
|
bool enable);
|
||||||
void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||||
|
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We only use drm_irq_uninstall() at unload and VT switch, so
|
||||||
|
* this is the only thing we need to check.
|
||||||
|
*/
|
||||||
|
return !dev_priv->pm._irqs_disabled;
|
||||||
|
}
|
||||||
|
|
||||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||||
void i9xx_check_fifo_underruns(struct drm_device *dev);
|
void i9xx_check_fifo_underruns(struct drm_device *dev);
|
||||||
|
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
/* intel_crt.c */
|
/* intel_crt.c */
|
||||||
void intel_crt_init(struct drm_device *dev);
|
void intel_crt_init(struct drm_device *dev);
|
||||||
@ -787,6 +799,7 @@ void intel_frontbuffer_flip(struct drm_device *dev,
|
|||||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
||||||
void intel_mark_idle(struct drm_device *dev);
|
void intel_mark_idle(struct drm_device *dev);
|
||||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||||
|
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||||
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
void intel_crtc_update_dpms(struct drm_crtc *crtc);
|
||||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||||
void intel_connector_dpms(struct drm_connector *, int mode);
|
void intel_connector_dpms(struct drm_connector *, int mode);
|
||||||
@ -901,7 +914,10 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
|
|||||||
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||||
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||||
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
||||||
void intel_edp_psr_exit(struct drm_device *dev);
|
void intel_edp_psr_invalidate(struct drm_device *dev,
|
||||||
|
unsigned frontbuffer_bits);
|
||||||
|
void intel_edp_psr_flush(struct drm_device *dev,
|
||||||
|
unsigned frontbuffer_bits);
|
||||||
void intel_edp_psr_init(struct drm_device *dev);
|
void intel_edp_psr_init(struct drm_device *dev);
|
||||||
|
|
||||||
int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
|
int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
|
||||||
@ -997,8 +1013,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
|||||||
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||||
struct intel_crtc_config *pipe_config,
|
struct intel_crtc_config *pipe_config,
|
||||||
int fitting_mode);
|
int fitting_mode);
|
||||||
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
|
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||||
u32 max);
|
u32 level, u32 max);
|
||||||
int intel_panel_setup_backlight(struct drm_connector *connector);
|
int intel_panel_setup_backlight(struct drm_connector *connector);
|
||||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||||
@ -1017,7 +1033,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
|
|||||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
uint32_t sprite_width, int pixel_size,
|
uint32_t sprite_width,
|
||||||
|
uint32_t sprite_height,
|
||||||
|
int pixel_size,
|
||||||
bool enabled, bool scaled);
|
bool enabled, bool scaled);
|
||||||
void intel_init_pm(struct drm_device *dev);
|
void intel_init_pm(struct drm_device *dev);
|
||||||
void intel_pm_setup(struct drm_device *dev);
|
void intel_pm_setup(struct drm_device *dev);
|
||||||
|
@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
|||||||
union hdmi_infoframe frame;
|
union hdmi_infoframe frame;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Set user selected PAR to incoming mode's member */
|
||||||
|
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
|
||||||
|
|
||||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
||||||
adjusted_mode);
|
adjusted_mode);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
|
|||||||
struct intel_encoder *encoder;
|
struct intel_encoder *encoder;
|
||||||
int count = 0, count_hdmi = 0;
|
int count = 0, count_hdmi = 0;
|
||||||
|
|
||||||
if (!HAS_PCH_SPLIT(dev))
|
if (HAS_GMCH_DISPLAY(dev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
|
||||||
@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (property == connector->dev->mode_config.aspect_ratio_property) {
|
||||||
|
switch (val) {
|
||||||
|
case DRM_MODE_PICTURE_ASPECT_NONE:
|
||||||
|
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
|
||||||
|
break;
|
||||||
|
case DRM_MODE_PICTURE_ASPECT_4_3:
|
||||||
|
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
|
||||||
|
break;
|
||||||
|
case DRM_MODE_PICTURE_ASPECT_16_9:
|
||||||
|
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
@ -1479,12 +1499,23 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
|
|||||||
.destroy = intel_encoder_destroy,
|
.destroy = intel_encoder_destroy,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void
|
||||||
|
intel_attach_aspect_ratio_property(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
if (!drm_mode_create_aspect_ratio_property(connector->dev))
|
||||||
|
drm_object_attach_property(&connector->base,
|
||||||
|
connector->dev->mode_config.aspect_ratio_property,
|
||||||
|
DRM_MODE_PICTURE_ASPECT_NONE);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
intel_attach_force_audio_property(connector);
|
intel_attach_force_audio_property(connector);
|
||||||
intel_attach_broadcast_rgb_property(connector);
|
intel_attach_broadcast_rgb_property(connector);
|
||||||
intel_hdmi->color_range_auto = true;
|
intel_hdmi->color_range_auto = true;
|
||||||
|
intel_attach_aspect_ratio_property(connector);
|
||||||
|
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||||
@ -1531,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
|||||||
if (IS_VALLEYVIEW(dev)) {
|
if (IS_VALLEYVIEW(dev)) {
|
||||||
intel_hdmi->write_infoframe = vlv_write_infoframe;
|
intel_hdmi->write_infoframe = vlv_write_infoframe;
|
||||||
intel_hdmi->set_infoframes = vlv_set_infoframes;
|
intel_hdmi->set_infoframes = vlv_set_infoframes;
|
||||||
} else if (!HAS_PCH_SPLIT(dev)) {
|
} else if (IS_G4X(dev)) {
|
||||||
intel_hdmi->write_infoframe = g4x_write_infoframe;
|
intel_hdmi->write_infoframe = g4x_write_infoframe;
|
||||||
intel_hdmi->set_infoframes = g4x_set_infoframes;
|
intel_hdmi->set_infoframes = g4x_set_infoframes;
|
||||||
} else if (HAS_DDI(dev)) {
|
} else if (HAS_DDI(dev)) {
|
||||||
|
@ -419,7 +419,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
|||||||
*/
|
*/
|
||||||
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
|
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
|
||||||
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
|
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
|
||||||
intel_panel_set_backlight(intel_connector, bclp, 255);
|
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
|
||||||
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
|
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
|
||||||
|
|
||||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||||
|
@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* scale - scale values from one range to another
|
||||||
|
*
|
||||||
|
* @source_val: value in range [@source_min..@source_max]
|
||||||
|
*
|
||||||
|
* Return @source_val in range [@source_min..@source_max] scaled to range
|
||||||
|
* [@target_min..@target_max].
|
||||||
|
*/
|
||||||
|
static uint32_t scale(uint32_t source_val,
|
||||||
|
uint32_t source_min, uint32_t source_max,
|
||||||
|
uint32_t target_min, uint32_t target_max)
|
||||||
|
{
|
||||||
|
uint64_t target_val;
|
||||||
|
|
||||||
|
WARN_ON(source_min > source_max);
|
||||||
|
WARN_ON(target_min > target_max);
|
||||||
|
|
||||||
|
/* defensive */
|
||||||
|
source_val = clamp(source_val, source_min, source_max);
|
||||||
|
|
||||||
|
/* avoid overflows */
|
||||||
|
target_val = (uint64_t)(source_val - source_min) *
|
||||||
|
(target_max - target_min);
|
||||||
|
do_div(target_val, source_max - source_min);
|
||||||
|
target_val += target_min;
|
||||||
|
|
||||||
|
return target_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
|
||||||
|
static inline u32 scale_user_to_hw(struct intel_connector *connector,
|
||||||
|
u32 user_level, u32 user_max)
|
||||||
|
{
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
|
|
||||||
|
return scale(user_level, 0, user_max,
|
||||||
|
panel->backlight.min, panel->backlight.max);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
|
||||||
|
* to [hw_min..hw_max]. */
|
||||||
|
static inline u32 clamp_user_to_hw(struct intel_connector *connector,
|
||||||
|
u32 user_level, u32 user_max)
|
||||||
|
{
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
|
u32 hw_level;
|
||||||
|
|
||||||
|
hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
|
||||||
|
hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
|
||||||
|
|
||||||
|
return hw_level;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
|
||||||
|
static inline u32 scale_hw_to_user(struct intel_connector *connector,
|
||||||
|
u32 hw_level, u32 user_max)
|
||||||
|
{
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
|
|
||||||
|
return scale(hw_level, panel->backlight.min, panel->backlight.max,
|
||||||
|
0, user_max);
|
||||||
|
}
|
||||||
|
|
||||||
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
||||||
u32 val)
|
u32 val)
|
||||||
{
|
{
|
||||||
@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
|
|||||||
dev_priv->display.set_backlight(connector, level);
|
dev_priv->display.set_backlight(connector, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set backlight brightness to level in range [0..max] */
|
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
|
||||||
void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
|
static void intel_panel_set_backlight(struct intel_connector *connector,
|
||||||
u32 max)
|
u32 user_level, u32 user_max)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->base.dev;
|
struct drm_device *dev = connector->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct intel_panel *panel = &connector->panel;
|
struct intel_panel *panel = &connector->panel;
|
||||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||||
u32 freq;
|
u32 hw_level;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 n;
|
|
||||||
|
|
||||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||||
return;
|
return;
|
||||||
@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
|
|||||||
|
|
||||||
WARN_ON(panel->backlight.max == 0);
|
WARN_ON(panel->backlight.max == 0);
|
||||||
|
|
||||||
/* scale to hardware max, but be careful to not overflow */
|
hw_level = scale_user_to_hw(connector, user_level, user_max);
|
||||||
freq = panel->backlight.max;
|
panel->backlight.level = hw_level;
|
||||||
n = (u64)level * freq;
|
|
||||||
do_div(n, max);
|
|
||||||
level = n;
|
|
||||||
|
|
||||||
panel->backlight.level = level;
|
|
||||||
if (panel->backlight.device)
|
|
||||||
panel->backlight.device->props.brightness = level;
|
|
||||||
|
|
||||||
if (panel->backlight.enabled)
|
if (panel->backlight.enabled)
|
||||||
intel_panel_actually_set_backlight(connector, level);
|
intel_panel_actually_set_backlight(connector, hw_level);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* set backlight brightness to level in range [0..max], assuming hw min is
|
||||||
|
* respected.
|
||||||
|
*/
|
||||||
|
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||||
|
u32 user_level, u32 user_max)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = connector->base.dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
|
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||||
|
u32 hw_level;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||||
|
|
||||||
|
WARN_ON(panel->backlight.max == 0);
|
||||||
|
|
||||||
|
hw_level = clamp_user_to_hw(connector, user_level, user_max);
|
||||||
|
panel->backlight.level = hw_level;
|
||||||
|
|
||||||
|
if (panel->backlight.device)
|
||||||
|
panel->backlight.device->props.brightness =
|
||||||
|
scale_hw_to_user(connector,
|
||||||
|
panel->backlight.level,
|
||||||
|
panel->backlight.device->props.max_brightness);
|
||||||
|
|
||||||
|
if (panel->backlight.enabled)
|
||||||
|
intel_panel_actually_set_backlight(connector, hw_level);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||||
}
|
}
|
||||||
@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
|||||||
panel->backlight.level = panel->backlight.max;
|
panel->backlight.level = panel->backlight.max;
|
||||||
if (panel->backlight.device)
|
if (panel->backlight.device)
|
||||||
panel->backlight.device->props.brightness =
|
panel->backlight.device->props.brightness =
|
||||||
panel->backlight.level;
|
scale_hw_to_user(connector,
|
||||||
|
panel->backlight.level,
|
||||||
|
panel->backlight.device->props.max_brightness);
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_priv->display.enable_backlight(connector);
|
dev_priv->display.enable_backlight(connector);
|
||||||
@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
|
|||||||
struct intel_connector *connector = bl_get_data(bd);
|
struct intel_connector *connector = bl_get_data(bd);
|
||||||
struct drm_device *dev = connector->base.dev;
|
struct drm_device *dev = connector->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
u32 hw_level;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
intel_runtime_pm_get(dev_priv);
|
||||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||||
ret = intel_panel_get_backlight(connector);
|
|
||||||
|
hw_level = intel_panel_get_backlight(connector);
|
||||||
|
ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
|
||||||
|
|
||||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
|
||||||
@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
|
|||||||
if (WARN_ON(panel->backlight.device))
|
if (WARN_ON(panel->backlight.device))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
BUG_ON(panel->backlight.max == 0);
|
WARN_ON(panel->backlight.max == 0);
|
||||||
|
|
||||||
memset(&props, 0, sizeof(props));
|
memset(&props, 0, sizeof(props));
|
||||||
props.type = BACKLIGHT_RAW;
|
props.type = BACKLIGHT_RAW;
|
||||||
props.brightness = panel->backlight.level;
|
|
||||||
|
/*
|
||||||
|
* Note: Everything should work even if the backlight device max
|
||||||
|
* presented to the userspace is arbitrarily chosen.
|
||||||
|
*/
|
||||||
props.max_brightness = panel->backlight.max;
|
props.max_brightness = panel->backlight.max;
|
||||||
|
props.brightness = scale_hw_to_user(connector,
|
||||||
|
panel->backlight.level,
|
||||||
|
props.max_brightness);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: using the same name independent of the connector prevents
|
* Note: using the same name independent of the connector prevents
|
||||||
@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
|
|||||||
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
|
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
|
||||||
* appropriately when it's 0. Use VBT and/or sane defaults.
|
* appropriately when it's 0. Use VBT and/or sane defaults.
|
||||||
*/
|
*/
|
||||||
|
static u32 get_backlight_min_vbt(struct intel_connector *connector)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = connector->base.dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct intel_panel *panel = &connector->panel;
|
||||||
|
|
||||||
|
WARN_ON(panel->backlight.max == 0);
|
||||||
|
|
||||||
|
/* vbt value is a coefficient in range [0..255] */
|
||||||
|
return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
|
||||||
|
0, panel->backlight.max);
|
||||||
|
}
|
||||||
|
|
||||||
static int bdw_setup_backlight(struct intel_connector *connector)
|
static int bdw_setup_backlight(struct intel_connector *connector)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = connector->base.dev;
|
struct drm_device *dev = connector->base.dev;
|
||||||
@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
|
|||||||
if (!panel->backlight.max)
|
if (!panel->backlight.max)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||||
|
|
||||||
val = bdw_get_backlight(connector);
|
val = bdw_get_backlight(connector);
|
||||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||||
|
|
||||||
@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
|
|||||||
if (!panel->backlight.max)
|
if (!panel->backlight.max)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||||
|
|
||||||
val = pch_get_backlight(connector);
|
val = pch_get_backlight(connector);
|
||||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||||
|
|
||||||
@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
|
|||||||
if (!panel->backlight.max)
|
if (!panel->backlight.max)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||||
|
|
||||||
val = i9xx_get_backlight(connector);
|
val = i9xx_get_backlight(connector);
|
||||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||||
|
|
||||||
@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
|
|||||||
if (!panel->backlight.max)
|
if (!panel->backlight.max)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||||
|
|
||||||
val = i9xx_get_backlight(connector);
|
val = i9xx_get_backlight(connector);
|
||||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||||
|
|
||||||
@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
|
|||||||
if (!panel->backlight.max)
|
if (!panel->backlight.max)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||||
|
|
||||||
val = _vlv_get_backlight(dev, PIPE_A);
|
val = _vlv_get_backlight(dev, PIPE_A);
|
||||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||||
|
|
||||||
|
@ -2743,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
|
|||||||
ilk_write_wm_values(dev_priv, &results);
|
ilk_write_wm_values(dev_priv, &results);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ilk_update_sprite_wm(struct drm_plane *plane,
|
static void
|
||||||
struct drm_crtc *crtc,
|
ilk_update_sprite_wm(struct drm_plane *plane,
|
||||||
uint32_t sprite_width, int pixel_size,
|
struct drm_crtc *crtc,
|
||||||
bool enabled, bool scaled)
|
uint32_t sprite_width, uint32_t sprite_height,
|
||||||
|
int pixel_size, bool enabled, bool scaled)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = plane->dev;
|
struct drm_device *dev = plane->dev;
|
||||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||||
@ -2754,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
|
|||||||
intel_plane->wm.enabled = enabled;
|
intel_plane->wm.enabled = enabled;
|
||||||
intel_plane->wm.scaled = scaled;
|
intel_plane->wm.scaled = scaled;
|
||||||
intel_plane->wm.horiz_pixels = sprite_width;
|
intel_plane->wm.horiz_pixels = sprite_width;
|
||||||
|
intel_plane->wm.vert_pixels = sprite_width;
|
||||||
intel_plane->wm.bytes_per_pixel = pixel_size;
|
intel_plane->wm.bytes_per_pixel = pixel_size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2888,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
|
|||||||
|
|
||||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||||
struct drm_crtc *crtc,
|
struct drm_crtc *crtc,
|
||||||
uint32_t sprite_width, int pixel_size,
|
uint32_t sprite_width,
|
||||||
|
uint32_t sprite_height,
|
||||||
|
int pixel_size,
|
||||||
bool enabled, bool scaled)
|
bool enabled, bool scaled)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = plane->dev->dev_private;
|
struct drm_i915_private *dev_priv = plane->dev->dev_private;
|
||||||
|
|
||||||
if (dev_priv->display.update_sprite_wm)
|
if (dev_priv->display.update_sprite_wm)
|
||||||
dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
|
dev_priv->display.update_sprite_wm(plane, crtc,
|
||||||
|
sprite_width, sprite_height,
|
||||||
pixel_size, enabled, scaled);
|
pixel_size, enabled, scaled);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3289,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
mutex_lock(&dev_priv->rps.hw_lock);
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
if (dev_priv->rps.enabled) {
|
if (dev_priv->rps.enabled) {
|
||||||
if (IS_VALLEYVIEW(dev))
|
if (IS_CHERRYVIEW(dev))
|
||||||
|
valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
|
||||||
|
else if (IS_VALLEYVIEW(dev))
|
||||||
vlv_set_rps_idle(dev_priv);
|
vlv_set_rps_idle(dev_priv);
|
||||||
else
|
else
|
||||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
|
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
|
||||||
@ -3392,6 +3399,8 @@ static void cherryview_disable_rps(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
I915_WRITE(GEN6_RC_CONTROL, 0);
|
I915_WRITE(GEN6_RC_CONTROL, 0);
|
||||||
|
|
||||||
|
gen8_disable_rps_interrupts(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void valleyview_disable_rps(struct drm_device *dev)
|
static void valleyview_disable_rps(struct drm_device *dev)
|
||||||
@ -3465,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
|
|||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
WARN_ON(dev_priv->rps.pm_iir);
|
WARN_ON(dev_priv->rps.pm_iir);
|
||||||
bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||||
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
@ -3476,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
|
|||||||
|
|
||||||
spin_lock_irq(&dev_priv->irq_lock);
|
spin_lock_irq(&dev_priv->irq_lock);
|
||||||
WARN_ON(dev_priv->rps.pm_iir);
|
WARN_ON(dev_priv->rps.pm_iir);
|
||||||
snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
|
||||||
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
|
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
|
||||||
spin_unlock_irq(&dev_priv->irq_lock);
|
spin_unlock_irq(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
@ -3781,7 +3790,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
|
|||||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
|
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 val, rp0;
|
u32 val, rp0;
|
||||||
|
|
||||||
@ -3801,7 +3810,17 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
|
|||||||
return rpe;
|
return rpe;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
|
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
u32 val, rp1;
|
||||||
|
|
||||||
|
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
|
||||||
|
rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
|
||||||
|
|
||||||
|
return rp1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 val, rpn;
|
u32 val, rpn;
|
||||||
|
|
||||||
@ -3810,7 +3829,18 @@ int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
|
|||||||
return rpn;
|
return rpn;
|
||||||
}
|
}
|
||||||
|
|
||||||
int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
|
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
u32 val, rp1;
|
||||||
|
|
||||||
|
val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
|
||||||
|
|
||||||
|
rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
|
||||||
|
|
||||||
|
return rp1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
u32 val, rp0;
|
u32 val, rp0;
|
||||||
|
|
||||||
@ -3835,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
|
|||||||
return rpe;
|
return rpe;
|
||||||
}
|
}
|
||||||
|
|
||||||
int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
|
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
|
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
|
||||||
}
|
}
|
||||||
@ -3952,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
|
|||||||
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
||||||
dev_priv->rps.efficient_freq);
|
dev_priv->rps.efficient_freq);
|
||||||
|
|
||||||
|
dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
|
||||||
|
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
|
||||||
|
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
|
||||||
|
dev_priv->rps.rp1_freq);
|
||||||
|
|
||||||
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
|
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
|
||||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||||
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||||
@ -3986,6 +4021,11 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
|
|||||||
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
|
||||||
dev_priv->rps.efficient_freq);
|
dev_priv->rps.efficient_freq);
|
||||||
|
|
||||||
|
dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
|
||||||
|
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
|
||||||
|
vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
|
||||||
|
dev_priv->rps.rp1_freq);
|
||||||
|
|
||||||
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
|
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
|
||||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||||
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
|
||||||
@ -4093,6 +4133,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
|
|||||||
|
|
||||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
|
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
|
||||||
|
|
||||||
|
gen8_enable_rps_interrupts(dev);
|
||||||
|
|
||||||
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4934,11 +4976,14 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
/* Interrupts should be disabled already to avoid re-arming. */
|
/* Interrupts should be disabled already to avoid re-arming. */
|
||||||
WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
|
WARN_ON(intel_irqs_enabled(dev_priv));
|
||||||
|
|
||||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||||
|
|
||||||
cancel_work_sync(&dev_priv->rps.work);
|
cancel_work_sync(&dev_priv->rps.work);
|
||||||
|
|
||||||
|
/* Force GPU to min freq during suspend */
|
||||||
|
gen6_rps_idle(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_disable_gt_powersave(struct drm_device *dev)
|
void intel_disable_gt_powersave(struct drm_device *dev)
|
||||||
@ -4946,7 +4991,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
|
|||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
/* Interrupts should be disabled already to avoid re-arming. */
|
/* Interrupts should be disabled already to avoid re-arming. */
|
||||||
WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
|
WARN_ON(intel_irqs_enabled(dev_priv));
|
||||||
|
|
||||||
if (IS_IRONLAKE_M(dev)) {
|
if (IS_IRONLAKE_M(dev)) {
|
||||||
ironlake_disable_drps(dev);
|
ironlake_disable_drps(dev);
|
||||||
@ -5684,6 +5729,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
|||||||
static void cherryview_init_clock_gating(struct drm_device *dev)
|
static void cherryview_init_clock_gating(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
u32 val;
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->rps.hw_lock);
|
||||||
|
val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
|
||||||
|
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||||
|
switch ((val >> 2) & 0x7) {
|
||||||
|
case 0:
|
||||||
|
case 1:
|
||||||
|
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
|
||||||
|
dev_priv->mem_freq = 1600;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
|
||||||
|
dev_priv->mem_freq = 1600;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
|
||||||
|
dev_priv->mem_freq = 2000;
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
|
||||||
|
dev_priv->mem_freq = 1600;
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
|
||||||
|
dev_priv->mem_freq = 1600;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
|
||||||
|
|
||||||
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
|
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
|
||||||
|
|
||||||
@ -5924,7 +5998,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
|||||||
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||||
@ -5940,21 +6013,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
|||||||
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
|
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
|
||||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||||
|
|
||||||
if (IS_BROADWELL(dev)) {
|
if (IS_BROADWELL(dev))
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
gen8_irq_power_well_post_enable(dev_priv);
|
||||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
|
|
||||||
dev_priv->de_irq_mask[PIPE_B]);
|
|
||||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
|
|
||||||
~dev_priv->de_irq_mask[PIPE_B] |
|
|
||||||
GEN8_PIPE_VBLANK);
|
|
||||||
I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
|
|
||||||
dev_priv->de_irq_mask[PIPE_C]);
|
|
||||||
I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
|
|
||||||
~dev_priv->de_irq_mask[PIPE_C] |
|
|
||||||
GEN8_PIPE_VBLANK);
|
|
||||||
POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
|
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||||
@ -6881,7 +6941,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||||
{
|
{
|
||||||
int div;
|
int div;
|
||||||
|
|
||||||
@ -6903,7 +6963,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
|||||||
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
|
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
|
||||||
}
|
}
|
||||||
|
|
||||||
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||||
{
|
{
|
||||||
int mul;
|
int mul;
|
||||||
|
|
||||||
@ -6925,6 +6985,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
|||||||
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
|
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||||
|
{
|
||||||
|
int div, freq;
|
||||||
|
|
||||||
|
switch (dev_priv->rps.cz_freq) {
|
||||||
|
case 200:
|
||||||
|
div = 5;
|
||||||
|
break;
|
||||||
|
case 267:
|
||||||
|
div = 6;
|
||||||
|
break;
|
||||||
|
case 320:
|
||||||
|
case 333:
|
||||||
|
case 400:
|
||||||
|
div = 8;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
|
||||||
|
|
||||||
|
return freq;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||||
|
{
|
||||||
|
int mul, opcode;
|
||||||
|
|
||||||
|
switch (dev_priv->rps.cz_freq) {
|
||||||
|
case 200:
|
||||||
|
mul = 5;
|
||||||
|
break;
|
||||||
|
case 267:
|
||||||
|
mul = 6;
|
||||||
|
break;
|
||||||
|
case 320:
|
||||||
|
case 333:
|
||||||
|
case 400:
|
||||||
|
mul = 8;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
|
||||||
|
|
||||||
|
return opcode;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
|
||||||
|
{
|
||||||
|
int ret = -1;
|
||||||
|
|
||||||
|
if (IS_CHERRYVIEW(dev_priv->dev))
|
||||||
|
ret = chv_gpu_freq(dev_priv, val);
|
||||||
|
else if (IS_VALLEYVIEW(dev_priv->dev))
|
||||||
|
ret = byt_gpu_freq(dev_priv, val);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
|
||||||
|
{
|
||||||
|
int ret = -1;
|
||||||
|
|
||||||
|
if (IS_CHERRYVIEW(dev_priv->dev))
|
||||||
|
ret = chv_freq_opcode(dev_priv, val);
|
||||||
|
else if (IS_VALLEYVIEW(dev_priv->dev))
|
||||||
|
ret = byt_freq_opcode(dev_priv, val);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
void intel_pm_setup(struct drm_device *dev)
|
void intel_pm_setup(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
@ -6935,5 +7069,5 @@ void intel_pm_setup(struct drm_device *dev)
|
|||||||
intel_gen6_powersave_work);
|
intel_gen6_powersave_work);
|
||||||
|
|
||||||
dev_priv->pm.suspended = false;
|
dev_priv->pm.suspended = false;
|
||||||
dev_priv->pm.irqs_disabled = false;
|
dev_priv->pm._irqs_disabled = false;
|
||||||
}
|
}
|
||||||
|
@ -1004,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
|
|||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
if (ring->irq_refcount++ == 0)
|
if (ring->irq_refcount++ == 0)
|
||||||
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
|
gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -1019,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
|
|||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
if (--ring->irq_refcount == 0)
|
if (--ring->irq_refcount == 0)
|
||||||
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
|
gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1212,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
|
|||||||
GT_PARITY_ERROR(dev)));
|
GT_PARITY_ERROR(dev)));
|
||||||
else
|
else
|
||||||
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
||||||
ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
|
gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
|
|
||||||
@ -1232,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
|
|||||||
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
|
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
|
||||||
else
|
else
|
||||||
I915_WRITE_IMR(ring, ~0);
|
I915_WRITE_IMR(ring, ~0);
|
||||||
ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
|
gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
}
|
}
|
||||||
@ -1250,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
|
|||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
if (ring->irq_refcount++ == 0) {
|
if (ring->irq_refcount++ == 0) {
|
||||||
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
|
||||||
snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
|
gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
|
|
||||||
@ -1270,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
|
|||||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||||
if (--ring->irq_refcount == 0) {
|
if (--ring->irq_refcount == 0) {
|
||||||
I915_WRITE_IMR(ring, ~0);
|
I915_WRITE_IMR(ring, ~0);
|
||||||
snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
|
gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
|||||||
|
|
||||||
sprctl |= SP_ENABLE;
|
sprctl |= SP_ENABLE;
|
||||||
|
|
||||||
intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
|
intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
|
||||||
|
pixel_size, true,
|
||||||
src_w != crtc_w || src_h != crtc_h);
|
src_w != crtc_w || src_h != crtc_h);
|
||||||
|
|
||||||
/* Sizes are 0 based */
|
/* Sizes are 0 based */
|
||||||
@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
|||||||
if (atomic_update)
|
if (atomic_update)
|
||||||
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
intel_pipe_update_end(intel_crtc, start_vbl_count);
|
||||||
|
|
||||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
|
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||||
|
|
||||||
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
|
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
|
||||||
|
true,
|
||||||
src_w != crtc_w || src_h != crtc_h);
|
src_w != crtc_w || src_h != crtc_h);
|
||||||
|
|
||||||
/* Sizes are 0 based */
|
/* Sizes are 0 based */
|
||||||
@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||||||
*/
|
*/
|
||||||
intel_wait_for_vblank(dev, pipe);
|
intel_wait_for_vblank(dev, pipe);
|
||||||
|
|
||||||
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
|
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
|||||||
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
|
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
|
||||||
dvscntr |= DVS_ENABLE;
|
dvscntr |= DVS_ENABLE;
|
||||||
|
|
||||||
intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
|
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
|
||||||
|
pixel_size, true,
|
||||||
src_w != crtc_w || src_h != crtc_h);
|
src_w != crtc_w || src_h != crtc_h);
|
||||||
|
|
||||||
/* Sizes are 0 based */
|
/* Sizes are 0 based */
|
||||||
@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
|
|||||||
*/
|
*/
|
||||||
intel_wait_for_vblank(dev, pipe);
|
intel_wait_for_vblank(dev, pipe);
|
||||||
|
|
||||||
intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
|
intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -514,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
|
hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
|
||||||
|
bool before)
|
||||||
{
|
{
|
||||||
|
const char *op = read ? "reading" : "writing to";
|
||||||
|
const char *when = before ? "before" : "after";
|
||||||
|
|
||||||
|
if (!i915.mmio_debug)
|
||||||
|
return;
|
||||||
|
|
||||||
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
||||||
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
|
WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
|
||||||
reg);
|
when, op, reg);
|
||||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
|
hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
|
if (i915.mmio_debug)
|
||||||
|
return;
|
||||||
|
|
||||||
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
|
||||||
DRM_ERROR("Unclaimed write to %x\n", reg);
|
DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
|
||||||
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -564,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
|||||||
static u##x \
|
static u##x \
|
||||||
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||||
REG_READ_HEADER(x); \
|
REG_READ_HEADER(x); \
|
||||||
|
hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
|
||||||
if (dev_priv->uncore.forcewake_count == 0 && \
|
if (dev_priv->uncore.forcewake_count == 0 && \
|
||||||
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||||
@ -574,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
|||||||
} else { \
|
} else { \
|
||||||
val = __raw_i915_read##x(dev_priv, reg); \
|
val = __raw_i915_read##x(dev_priv, reg); \
|
||||||
} \
|
} \
|
||||||
|
hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
|
||||||
REG_READ_FOOTER; \
|
REG_READ_FOOTER; \
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -700,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
|||||||
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||||
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
|
||||||
} \
|
} \
|
||||||
hsw_unclaimed_reg_clear(dev_priv, reg); \
|
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||||
__raw_i915_write##x(dev_priv, reg, val); \
|
__raw_i915_write##x(dev_priv, reg, val); \
|
||||||
if (unlikely(__fifo_ret)) { \
|
if (unlikely(__fifo_ret)) { \
|
||||||
gen6_gt_check_fifodbg(dev_priv); \
|
gen6_gt_check_fifodbg(dev_priv); \
|
||||||
} \
|
} \
|
||||||
hsw_unclaimed_reg_check(dev_priv, reg); \
|
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||||
|
hsw_unclaimed_reg_detect(dev_priv); \
|
||||||
REG_WRITE_FOOTER; \
|
REG_WRITE_FOOTER; \
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -734,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
|||||||
static void \
|
static void \
|
||||||
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
|
||||||
REG_WRITE_HEADER; \
|
REG_WRITE_HEADER; \
|
||||||
|
hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
|
||||||
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
|
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
|
||||||
if (dev_priv->uncore.forcewake_count == 0) \
|
if (dev_priv->uncore.forcewake_count == 0) \
|
||||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||||
@ -745,6 +759,8 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
|
|||||||
} else { \
|
} else { \
|
||||||
__raw_i915_write##x(dev_priv, reg, val); \
|
__raw_i915_write##x(dev_priv, reg, val); \
|
||||||
} \
|
} \
|
||||||
|
hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
|
||||||
|
hsw_unclaimed_reg_detect(dev_priv); \
|
||||||
REG_WRITE_FOOTER; \
|
REG_WRITE_FOOTER; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,13 +119,6 @@ struct omap_drm_private {
|
|||||||
struct omap_drm_irq error_handler;
|
struct omap_drm_irq error_handler;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* this should probably be in drm-core to standardize amongst drivers */
|
|
||||||
#define DRM_ROTATE_0 0
|
|
||||||
#define DRM_ROTATE_90 1
|
|
||||||
#define DRM_ROTATE_180 2
|
|
||||||
#define DRM_ROTATE_270 3
|
|
||||||
#define DRM_REFLECT_X 4
|
|
||||||
#define DRM_REFLECT_Y 5
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
int omap_debugfs_init(struct drm_minor *minor);
|
int omap_debugfs_init(struct drm_minor *minor);
|
||||||
|
@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
|
|||||||
if (priv->has_dmm) {
|
if (priv->has_dmm) {
|
||||||
prop = priv->rotation_prop;
|
prop = priv->rotation_prop;
|
||||||
if (!prop) {
|
if (!prop) {
|
||||||
const struct drm_prop_enum_list props[] = {
|
prop = drm_mode_create_rotation_property(dev,
|
||||||
{ DRM_ROTATE_0, "rotate-0" },
|
BIT(DRM_ROTATE_0) |
|
||||||
{ DRM_ROTATE_90, "rotate-90" },
|
BIT(DRM_ROTATE_90) |
|
||||||
{ DRM_ROTATE_180, "rotate-180" },
|
BIT(DRM_ROTATE_180) |
|
||||||
{ DRM_ROTATE_270, "rotate-270" },
|
BIT(DRM_ROTATE_270) |
|
||||||
{ DRM_REFLECT_X, "reflect-x" },
|
BIT(DRM_REFLECT_X) |
|
||||||
{ DRM_REFLECT_Y, "reflect-y" },
|
BIT(DRM_REFLECT_Y));
|
||||||
};
|
|
||||||
prop = drm_property_create_bitmask(dev, 0, "rotation",
|
|
||||||
props, ARRAY_SIZE(props));
|
|
||||||
if (prop == NULL)
|
if (prop == NULL)
|
||||||
return;
|
return;
|
||||||
priv->rotation_prop = prop;
|
priv->rotation_prop = prop;
|
||||||
|
@ -76,6 +76,14 @@ static inline uint64_t I642U64(int64_t val)
|
|||||||
return (uint64_t)*((uint64_t *)&val);
|
return (uint64_t)*((uint64_t *)&val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* rotation property bits */
|
||||||
|
#define DRM_ROTATE_0 0
|
||||||
|
#define DRM_ROTATE_90 1
|
||||||
|
#define DRM_ROTATE_180 2
|
||||||
|
#define DRM_ROTATE_270 3
|
||||||
|
#define DRM_REFLECT_X 4
|
||||||
|
#define DRM_REFLECT_Y 5
|
||||||
|
|
||||||
enum drm_connector_force {
|
enum drm_connector_force {
|
||||||
DRM_FORCE_UNSPECIFIED,
|
DRM_FORCE_UNSPECIFIED,
|
||||||
DRM_FORCE_OFF,
|
DRM_FORCE_OFF,
|
||||||
@ -835,6 +843,7 @@ struct drm_mode_config {
|
|||||||
|
|
||||||
/* Optional properties */
|
/* Optional properties */
|
||||||
struct drm_property *scaling_mode_property;
|
struct drm_property *scaling_mode_property;
|
||||||
|
struct drm_property *aspect_ratio_property;
|
||||||
struct drm_property *dirty_info_property;
|
struct drm_property *dirty_info_property;
|
||||||
|
|
||||||
/* dumb ioctl parameters */
|
/* dumb ioctl parameters */
|
||||||
@ -1011,7 +1020,8 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
|
|||||||
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
|
||||||
int flags, const char *name,
|
int flags, const char *name,
|
||||||
const struct drm_prop_enum_list *props,
|
const struct drm_prop_enum_list *props,
|
||||||
int num_values);
|
int num_props,
|
||||||
|
uint64_t supported_bits);
|
||||||
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
|
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
|
||||||
const char *name,
|
const char *name,
|
||||||
uint64_t min, uint64_t max);
|
uint64_t min, uint64_t max);
|
||||||
@ -1027,6 +1037,7 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
|
|||||||
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
|
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
|
||||||
char *formats[]);
|
char *formats[]);
|
||||||
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
||||||
|
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
|
||||||
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
||||||
|
|
||||||
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
|
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
|
||||||
@ -1117,6 +1128,10 @@ extern int drm_format_plane_cpp(uint32_t format, int plane);
|
|||||||
extern int drm_format_horz_chroma_subsampling(uint32_t format);
|
extern int drm_format_horz_chroma_subsampling(uint32_t format);
|
||||||
extern int drm_format_vert_chroma_subsampling(uint32_t format);
|
extern int drm_format_vert_chroma_subsampling(uint32_t format);
|
||||||
extern const char *drm_get_format_name(uint32_t format);
|
extern const char *drm_get_format_name(uint32_t format);
|
||||||
|
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
|
||||||
|
unsigned int supported_rotations);
|
||||||
|
extern unsigned int drm_rotation_simplify(unsigned int rotation,
|
||||||
|
unsigned int supported_rotations);
|
||||||
|
|
||||||
/* Helpers */
|
/* Helpers */
|
||||||
|
|
||||||
|
@ -163,5 +163,11 @@ int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
|
|||||||
struct drm_rect *dst,
|
struct drm_rect *dst,
|
||||||
int min_vscale, int max_vscale);
|
int min_vscale, int max_vscale);
|
||||||
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
|
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
|
||||||
|
void drm_rect_rotate(struct drm_rect *r,
|
||||||
|
int width, int height,
|
||||||
|
unsigned int rotation);
|
||||||
|
void drm_rect_rotate_inv(struct drm_rect *r,
|
||||||
|
int width, int height,
|
||||||
|
unsigned int rotation);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -88,6 +88,11 @@
|
|||||||
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
|
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
|
||||||
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
|
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
|
||||||
|
|
||||||
|
/* Picture aspect ratio options */
|
||||||
|
#define DRM_MODE_PICTURE_ASPECT_NONE 0
|
||||||
|
#define DRM_MODE_PICTURE_ASPECT_4_3 1
|
||||||
|
#define DRM_MODE_PICTURE_ASPECT_16_9 2
|
||||||
|
|
||||||
/* Dithering mode options */
|
/* Dithering mode options */
|
||||||
#define DRM_MODE_DITHERING_OFF 0
|
#define DRM_MODE_DITHERING_OFF 0
|
||||||
#define DRM_MODE_DITHERING_ON 1
|
#define DRM_MODE_DITHERING_ON 1
|
||||||
|
Loading…
Reference in New Issue
Block a user