mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 22:26:44 +07:00
Merge tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: Highlights: - Imre's for_each_sg_pages rework (now also with the stolen mem backed case fixed with a hack) plus the drm prime sg list coalescing patch from Rahul Sharma. I have some follow-up cleanups pending, already acked by Andrew Morton. - Some prep-work for the crazy no-pch/display-less platform by Ben. - Some vlv patches, by far not all (Jesse et al). - Clean up the HDMI/SDVO #define confusion (Paulo) - gen2-4 vblank fixes from Ville. - Unclaimed register warning fixes for hsw (Paulo). More still to come ... - Complete pageflips which have been stuck in a gpu hang, should prevent stuck gl compositors (Ville). - pm patches for vt-switchless resume (Jesse). Note that the i915 enabling is not (yet) included, that took a bit longer to settle. PM patches are acked by Rafael Wysocki. - Minor fixlets all over from various people. * tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel: (79 commits) drm/i915: Implement WaSwitchSolVfFArbitrationPriority drm/i915: Set the VIC in AVI infoframe for SDVO drm/i915: Kill a strange comment about DPMS functions drm/i915: Correct sandybrige overclocking drm/i915: Introduce GEN7_FEATURES for device info drm/i915: Move num_pipes to intel info drm/i915: fixup pd vs pt confusion in gen6 ppgtt code style nit: Align function parameter continuation properly. drm/i915: VLV doesn't have HDMI on port C drm/i915: DSPFW and BLC regs are in the display offset range drm/i915: set conservative clock gating values on VLV v2 drm/i915: fix WaDisablePSDDualDispatchEnable on VLV v2 drm/i915: add more VLV IDs drm/i915: use VLV DIP routines on VLV v2 drm/i915: add media well to VLV force wake routines v2 drm/i915: don't use plane pipe select on VLV drm: modify pages_to_sg prime helper to create optimized SG table drm/i915: use for_each_sg_page for setting up the gtt ptes drm/i915: create compact dma scatter lists for gem objects drm/i915: handle walking compact dma scatter lists ...
This commit is contained in:
commit
399403c7ce
@ -105,12 +105,11 @@ drm_clflush_sg(struct sg_table *st)
|
||||
{
|
||||
#if defined(CONFIG_X86)
|
||||
if (cpu_has_clflush) {
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
mb();
|
||||
for_each_sg(st->sgl, sg, st->nents, i)
|
||||
drm_clflush_page(sg_page(sg));
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
||||
drm_clflush_page(sg_iter.page);
|
||||
mb();
|
||||
|
||||
return;
|
||||
|
@ -401,21 +401,17 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
|
||||
{
|
||||
struct sg_table *sg = NULL;
|
||||
struct scatterlist *iter;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
if (!sg)
|
||||
goto out;
|
||||
|
||||
ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
|
||||
ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
|
||||
nr_pages << PAGE_SHIFT, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
for_each_sg(sg->sgl, iter, nr_pages, i)
|
||||
sg_set_page(iter, pages[i], PAGE_SIZE, 0);
|
||||
|
||||
return sg;
|
||||
out:
|
||||
kfree(sg);
|
||||
|
@ -772,6 +772,23 @@ static int i915_error_state(struct seq_file *m, void *unused)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj = error->ring[i].ctx;
|
||||
if (obj) {
|
||||
seq_printf(m, "%s --- HW Context = 0x%08x\n",
|
||||
dev_priv->ring[i].name,
|
||||
obj->gtt_offset);
|
||||
offset = 0;
|
||||
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
|
||||
seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
|
||||
offset,
|
||||
obj->pages[0][elt],
|
||||
obj->pages[0][elt+1],
|
||||
obj->pages[0][elt+2],
|
||||
obj->pages[0][elt+3]);
|
||||
offset += 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (error->overlay)
|
||||
@ -849,76 +866,42 @@ static const struct file_operations i915_error_state_fops = {
|
||||
.release = i915_error_state_release,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
i915_next_seqno_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_next_seqno_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"next_seqno : 0x%x\n",
|
||||
dev_priv->next_seqno);
|
||||
|
||||
*val = dev_priv->next_seqno;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_next_seqno_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_next_seqno_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
char buf[20];
|
||||
u32 val = 1;
|
||||
struct drm_device *dev = data;
|
||||
int ret;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
ret = kstrtouint(buf, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_set_seqno(dev, val);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret ?: cnt;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_next_seqno_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_next_seqno_read,
|
||||
.write = i915_next_seqno_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
|
||||
i915_next_seqno_get, i915_next_seqno_set,
|
||||
"next_seqno : 0x%llx\n");
|
||||
|
||||
static int i915_rstdby_delays(struct seq_file *m, void *unused)
|
||||
{
|
||||
@ -1680,105 +1663,51 @@ static int i915_dpio_info(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_wedged_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_wedged_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"wedged : %d\n",
|
||||
atomic_read(&dev_priv->gpu_error.reset_counter));
|
||||
*val = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_wedged_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_wedged_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
char buf[20];
|
||||
int val = 1;
|
||||
struct drm_device *dev = data;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
DRM_INFO("Manually setting wedged to %d\n", val);
|
||||
DRM_INFO("Manually setting wedged to %llu\n", val);
|
||||
i915_handle_error(dev, val);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_wedged_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_wedged_read,
|
||||
.write = i915_wedged_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
|
||||
i915_wedged_get, i915_wedged_set,
|
||||
"wedged : %llu\n");
|
||||
|
||||
static ssize_t
|
||||
i915_ring_stop_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_ring_stop_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int len;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"0x%08x\n", dev_priv->gpu_error.stop_rings);
|
||||
*val = dev_priv->gpu_error.stop_rings;
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_ring_stop_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_ring_stop_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 0, ret;
|
||||
int ret;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
|
||||
DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
@ -1787,16 +1716,12 @@ i915_ring_stop_write(struct file *filp,
|
||||
dev_priv->gpu_error.stop_rings = val;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_ring_stop_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_ring_stop_read,
|
||||
.write = i915_ring_stop_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
|
||||
i915_ring_stop_get, i915_ring_stop_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
#define DROP_UNBOUND 0x1
|
||||
#define DROP_BOUND 0x2
|
||||
@ -1806,46 +1731,23 @@ static const struct file_operations i915_ring_stop_fops = {
|
||||
DROP_BOUND | \
|
||||
DROP_RETIRE | \
|
||||
DROP_ACTIVE)
|
||||
static ssize_t
|
||||
i915_drop_caches_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_drop_caches_get(void *data, u64 *val)
|
||||
{
|
||||
char buf[20];
|
||||
int len;
|
||||
*val = DROP_ALL;
|
||||
|
||||
len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_drop_caches_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_drop_caches_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj, *next;
|
||||
char buf[20];
|
||||
int val = 0, ret;
|
||||
int ret;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
|
||||
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
|
||||
|
||||
/* No need to check and wait for gpu resets, only libdrm auto-restarts
|
||||
* on ioctls on -EAGAIN. */
|
||||
@ -1883,27 +1785,19 @@ i915_drop_caches_write(struct file *filp,
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret ?: cnt;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_drop_caches_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_drop_caches_read,
|
||||
.write = i915_drop_caches_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
|
||||
i915_drop_caches_get, i915_drop_caches_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
static ssize_t
|
||||
i915_max_freq_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_max_freq_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len, ret;
|
||||
int ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
@ -1912,42 +1806,23 @@ i915_max_freq_read(struct file *filp,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
|
||||
*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_max_freq_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_max_freq_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 1, ret;
|
||||
int ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
|
||||
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
@ -1956,30 +1831,24 @@ i915_max_freq_write(struct file *filp,
|
||||
/*
|
||||
* Turbo will still be enabled, but won't go above the set value.
|
||||
*/
|
||||
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
|
||||
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
||||
dev_priv->rps.max_delay = val;
|
||||
gen6_set_rps(dev, val);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_max_freq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_max_freq_read,
|
||||
.write = i915_max_freq_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
|
||||
i915_max_freq_get, i915_max_freq_set,
|
||||
"max freq: %llu\n");
|
||||
|
||||
static ssize_t
|
||||
i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_min_freq_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
int len, ret;
|
||||
int ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
@ -1988,40 +1857,23 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
|
||||
*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_min_freq_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
int val = 1, ret;
|
||||
int ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
|
||||
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
|
||||
if (ret)
|
||||
@ -2030,33 +1882,25 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
/*
|
||||
* Turbo will still be enabled, but won't go below the set value.
|
||||
*/
|
||||
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
|
||||
|
||||
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
|
||||
do_div(val, GT_FREQUENCY_MULTIPLIER);
|
||||
dev_priv->rps.min_delay = val;
|
||||
gen6_set_rps(dev, val);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_min_freq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_min_freq_read,
|
||||
.write = i915_min_freq_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
|
||||
i915_min_freq_get, i915_min_freq_set,
|
||||
"min freq: %llu\n");
|
||||
|
||||
static ssize_t
|
||||
i915_cache_sharing_read(struct file *filp,
|
||||
char __user *ubuf,
|
||||
size_t max,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_cache_sharing_get(void *data, u64 *val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
char buf[80];
|
||||
u32 snpcr;
|
||||
int len, ret;
|
||||
int ret;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
@ -2068,46 +1912,25 @@ i915_cache_sharing_read(struct file *filp,
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
|
||||
len = snprintf(buf, sizeof(buf),
|
||||
"%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
|
||||
GEN6_MBC_SNPCR_SHIFT);
|
||||
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
||||
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_cache_sharing_write(struct file *filp,
|
||||
const char __user *ubuf,
|
||||
size_t cnt,
|
||||
loff_t *ppos)
|
||||
static int
|
||||
i915_cache_sharing_set(void *data, u64 val)
|
||||
{
|
||||
struct drm_device *dev = filp->private_data;
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
char buf[20];
|
||||
u32 snpcr;
|
||||
int val = 1;
|
||||
|
||||
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
if (cnt > 0) {
|
||||
if (cnt > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(buf, ubuf, cnt))
|
||||
return -EFAULT;
|
||||
buf[cnt] = 0;
|
||||
|
||||
val = simple_strtoul(buf, NULL, 0);
|
||||
}
|
||||
|
||||
if (val < 0 || val > 3)
|
||||
if (val > 3)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
|
||||
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
|
||||
|
||||
/* Update the cache sharing policy here as well */
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
@ -2115,16 +1938,12 @@ i915_cache_sharing_write(struct file *filp,
|
||||
snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
|
||||
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
|
||||
|
||||
return cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_cache_sharing_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i915_cache_sharing_read,
|
||||
.write = i915_cache_sharing_write,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
|
||||
i915_cache_sharing_get, i915_cache_sharing_set,
|
||||
"%llu\n");
|
||||
|
||||
/* As the drm_debugfs_init() routines are called before dev->dev_private is
|
||||
* allocated we need to hook into the minor for release. */
|
||||
|
@ -1452,6 +1452,22 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
|
||||
#undef DEV_INFO_SEP
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_early_sanitize_regs - clean up BIOS state
|
||||
* @dev: DRM device
|
||||
*
|
||||
* This function must be called before we do any I915_READ or I915_WRITE. Its
|
||||
* purpose is to clean up any state left by the BIOS that may affect us when
|
||||
* reading and/or writing registers.
|
||||
*/
|
||||
static void intel_early_sanitize_regs(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
@ -1542,6 +1558,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
goto put_gmch;
|
||||
}
|
||||
|
||||
intel_early_sanitize_regs(dev);
|
||||
|
||||
aperture_size = dev_priv->gtt.mappable_end;
|
||||
|
||||
dev_priv->gtt.mappable =
|
||||
@ -1612,14 +1630,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
mutex_init(&dev_priv->rps.hw_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
||||
dev_priv->num_pipe = 3;
|
||||
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
|
||||
dev_priv->num_pipe = 2;
|
||||
else
|
||||
dev_priv->num_pipe = 1;
|
||||
|
||||
ret = drm_vblank_init(dev, dev_priv->num_pipe);
|
||||
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
|
||||
if (ret)
|
||||
goto out_gem_unload;
|
||||
|
||||
|
@ -121,9 +121,7 @@ MODULE_PARM_DESC(i915_enable_ppgtt,
|
||||
unsigned int i915_preliminary_hw_support __read_mostly = 0;
|
||||
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
|
||||
MODULE_PARM_DESC(preliminary_hw_support,
|
||||
"Enable preliminary hardware support. "
|
||||
"Enable Haswell and ValleyView Support. "
|
||||
"(default: false)");
|
||||
"Enable preliminary hardware support. (default: false)");
|
||||
|
||||
int i915_disable_power_well __read_mostly = 0;
|
||||
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
|
||||
@ -143,74 +141,74 @@ extern int intel_agp_enabled;
|
||||
.driver_data = (unsigned long) info }
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
|
||||
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_845g_info = {
|
||||
.gen = 2,
|
||||
.gen = 2, .num_pipes = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i85x_info = {
|
||||
.gen = 2, .is_i85x = 1, .is_mobile = 1,
|
||||
.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i865g_info = {
|
||||
.gen = 2,
|
||||
.gen = 2, .num_pipes = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
|
||||
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
.gen = 3, .is_mobile = 1,
|
||||
.gen = 3, .is_mobile = 1, .num_pipes = 2,
|
||||
.cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.supports_tv = 1,
|
||||
};
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
};
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
.gen = 3, .is_i945gm = 1, .is_mobile = 1,
|
||||
.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.supports_tv = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
.gen = 4, .is_broadwater = 1,
|
||||
.gen = 4, .is_broadwater = 1, .num_pipes = 2,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
.gen = 4, .is_crestline = 1,
|
||||
.gen = 4, .is_crestline = 1, .num_pipes = 2,
|
||||
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
.supports_tv = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
.gen = 3, .is_g33 = 1,
|
||||
.gen = 3, .is_g33 = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g45_info = {
|
||||
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
|
||||
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
|
||||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
.gen = 4, .is_g4x = 1,
|
||||
.gen = 4, .is_g4x = 1, .num_pipes = 2,
|
||||
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
|
||||
.has_pipe_cxsr = 1, .has_hotplug = 1,
|
||||
.supports_tv = 1,
|
||||
@ -218,26 +216,26 @@ static const struct intel_device_info intel_gm45_info = {
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
|
||||
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
.gen = 5,
|
||||
.gen = 5, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.gen = 5, .is_mobile = 1,
|
||||
.gen = 5, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.has_bsd_ring = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
.gen = 6,
|
||||
.gen = 6, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
@ -246,7 +244,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.gen = 6, .is_mobile = 1,
|
||||
.gen = 6, .is_mobile = 1, .num_pipes = 2,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 1,
|
||||
.has_bsd_ring = 1,
|
||||
@ -255,61 +253,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.has_force_wake = 1,
|
||||
};
|
||||
|
||||
#define GEN7_FEATURES \
|
||||
.gen = 7, .num_pipes = 3, \
|
||||
.need_gfx_hws = 1, .has_hotplug = 1, \
|
||||
.has_bsd_ring = 1, \
|
||||
.has_blt_ring = 1, \
|
||||
.has_llc = 1, \
|
||||
.has_force_wake = 1
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_d_info = {
|
||||
.is_ivybridge = 1, .gen = 7,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_force_wake = 1,
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_info = {
|
||||
.is_ivybridge = 1, .gen = 7, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_force_wake = 1,
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_m_info = {
|
||||
.gen = 7, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 0,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
GEN7_FEATURES,
|
||||
.is_mobile = 1,
|
||||
.num_pipes = 2,
|
||||
.is_valleyview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_d_info = {
|
||||
.gen = 7,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_fbc = 0,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
GEN7_FEATURES,
|
||||
.num_pipes = 2,
|
||||
.is_valleyview = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_d_info = {
|
||||
.is_haswell = 1, .gen = 7,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_force_wake = 1,
|
||||
GEN7_FEATURES,
|
||||
.is_haswell = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_m_info = {
|
||||
.is_haswell = 1, .gen = 7, .is_mobile = 1,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.has_bsd_ring = 1,
|
||||
.has_blt_ring = 1,
|
||||
.has_llc = 1,
|
||||
.has_force_wake = 1,
|
||||
GEN7_FEATURES,
|
||||
.is_haswell = 1,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
@ -394,6 +380,9 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
|
||||
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
|
||||
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
|
||||
{0, 0, 0}
|
||||
@ -1147,6 +1136,27 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE_NOTRACE(MI_MODE, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
if (IS_HASWELL(dev_priv->dev) &&
|
||||
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
|
||||
DRM_ERROR("Unknown unclaimed register before writing to %x\n",
|
||||
reg);
|
||||
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
if (IS_HASWELL(dev_priv->dev) &&
|
||||
(I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
|
||||
DRM_ERROR("Unclaimed write to %x\n", reg);
|
||||
I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
|
||||
}
|
||||
}
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
u##x val = 0; \
|
||||
@ -1183,18 +1193,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
|
||||
} \
|
||||
if (IS_GEN5(dev_priv->dev)) \
|
||||
ilk_dummy_write(dev_priv); \
|
||||
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
|
||||
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
|
||||
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
|
||||
} \
|
||||
hsw_unclaimed_reg_clear(dev_priv, reg); \
|
||||
write##y(val, dev_priv->regs + reg); \
|
||||
if (unlikely(__fifo_ret)) { \
|
||||
gen6_gt_check_fifodbg(dev_priv); \
|
||||
} \
|
||||
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
|
||||
DRM_ERROR("Unclaimed write to %x\n", reg); \
|
||||
writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
|
||||
} \
|
||||
hsw_unclaimed_reg_check(dev_priv, reg); \
|
||||
}
|
||||
__i915_write(8, b)
|
||||
__i915_write(16, w)
|
||||
|
@ -93,7 +93,7 @@ enum port {
|
||||
I915_GEM_DOMAIN_INSTRUCTION | \
|
||||
I915_GEM_DOMAIN_VERTEX)
|
||||
|
||||
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
|
||||
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
@ -243,7 +243,7 @@ struct drm_i915_error_state {
|
||||
int page_count;
|
||||
u32 gtt_offset;
|
||||
u32 *pages[0];
|
||||
} *ringbuffer, *batchbuffer;
|
||||
} *ringbuffer, *batchbuffer, *ctx;
|
||||
struct drm_i915_error_request {
|
||||
long jiffies;
|
||||
u32 seqno;
|
||||
@ -341,6 +341,7 @@ struct drm_i915_gt_funcs {
|
||||
|
||||
struct intel_device_info {
|
||||
u32 display_mmio_offset;
|
||||
u8 num_pipes:3;
|
||||
u8 gen;
|
||||
u8 is_mobile:1;
|
||||
u8 is_i85x:1;
|
||||
@ -905,7 +906,6 @@ typedef struct drm_i915_private {
|
||||
struct mutex dpio_lock;
|
||||
|
||||
/** Cached value of IMR to avoid reads in updating the bitfield */
|
||||
u32 pipestat[2];
|
||||
u32 irq_mask;
|
||||
u32 gt_irq_mask;
|
||||
|
||||
@ -913,7 +913,6 @@ typedef struct drm_i915_private {
|
||||
struct work_struct hotplug_work;
|
||||
bool enable_hotplug_processing;
|
||||
|
||||
int num_pipe;
|
||||
int num_pch_pll;
|
||||
|
||||
unsigned long cfb_size;
|
||||
@ -1340,6 +1339,7 @@ struct drm_i915_file_private {
|
||||
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
|
||||
|
||||
#define HAS_DDI(dev) (IS_HASWELL(dev))
|
||||
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
@ -1529,17 +1529,12 @@ void i915_gem_lastclose(struct drm_device *dev);
|
||||
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
|
||||
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
struct scatterlist *sg = obj->pages->sgl;
|
||||
int nents = obj->pages->nents;
|
||||
while (nents > SG_MAX_SINGLE_ALLOC) {
|
||||
if (n < SG_MAX_SINGLE_ALLOC - 1)
|
||||
break;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
|
||||
n -= SG_MAX_SINGLE_ALLOC - 1;
|
||||
nents -= SG_MAX_SINGLE_ALLOC - 1;
|
||||
}
|
||||
return sg_page(sg+n);
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
|
||||
return sg_iter.page;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
@ -1901,4 +1896,9 @@ static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
|
||||
return VGACNTRL;
|
||||
}
|
||||
|
||||
static inline void __user *to_user_ptr(u64 address)
|
||||
{
|
||||
return (void __user *)(uintptr_t)address;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -411,10 +411,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
|
||||
int prefaulted = 0;
|
||||
int needs_clflush = 0;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
|
||||
offset = args->offset;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
|
||||
struct page *page;
|
||||
|
||||
if (i < offset >> PAGE_SHIFT)
|
||||
continue;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
|
||||
offset >> PAGE_SHIFT) {
|
||||
struct page *page = sg_iter.page;
|
||||
|
||||
if (remain <= 0)
|
||||
break;
|
||||
@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
||||
page_length = PAGE_SIZE - shmem_page_offset;
|
||||
|
||||
page = sg_page(sg);
|
||||
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
|
||||
(page_to_phys(page) & (1 << 17)) != 0;
|
||||
|
||||
@ -522,7 +518,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE,
|
||||
(char __user *)(uintptr_t)args->data_ptr,
|
||||
to_user_ptr(args->data_ptr),
|
||||
args->size))
|
||||
return -EFAULT;
|
||||
|
||||
@ -613,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
offset = obj->gtt_offset + args->offset;
|
||||
@ -732,10 +728,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
||||
int hit_slowpath = 0;
|
||||
int needs_clflush_after = 0;
|
||||
int needs_clflush_before = 0;
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
user_data = to_user_ptr(args->data_ptr);
|
||||
remain = args->size;
|
||||
|
||||
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
|
||||
@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
||||
offset = args->offset;
|
||||
obj->dirty = 1;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
|
||||
struct page *page;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
|
||||
offset >> PAGE_SHIFT) {
|
||||
struct page *page = sg_iter.page;
|
||||
int partial_cacheline_write;
|
||||
|
||||
if (i < offset >> PAGE_SHIFT)
|
||||
continue;
|
||||
|
||||
if (remain <= 0)
|
||||
break;
|
||||
|
||||
@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
|
||||
((shmem_page_offset | page_length)
|
||||
& (boot_cpu_data.x86_clflush_size - 1));
|
||||
|
||||
page = sg_page(sg);
|
||||
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
|
||||
(page_to_phys(page) & (1 << 17)) != 0;
|
||||
|
||||
@ -867,11 +859,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
|
||||
if (!access_ok(VERIFY_READ,
|
||||
(char __user *)(uintptr_t)args->data_ptr,
|
||||
to_user_ptr(args->data_ptr),
|
||||
args->size))
|
||||
return -EFAULT;
|
||||
|
||||
ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
|
||||
ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
|
||||
args->size);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
@ -1633,9 +1625,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
|
||||
static void
|
||||
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
int page_count = obj->base.size / PAGE_SIZE;
|
||||
struct scatterlist *sg;
|
||||
int ret, i;
|
||||
struct sg_page_iter sg_iter;
|
||||
int ret;
|
||||
|
||||
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
||||
|
||||
@ -1655,8 +1646,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
if (obj->madv == I915_MADV_DONTNEED)
|
||||
obj->dirty = 0;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, page_count, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
struct page *page = sg_iter.page;
|
||||
|
||||
if (obj->dirty)
|
||||
set_page_dirty(page);
|
||||
@ -1757,7 +1748,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
struct address_space *mapping;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
struct sg_page_iter sg_iter;
|
||||
struct page *page;
|
||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
||||
gfp_t gfp;
|
||||
|
||||
/* Assert that the object is not currently in any GPU domain. As it
|
||||
@ -1787,7 +1780,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
gfp = mapping_gfp_mask(mapping);
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
|
||||
gfp &= ~(__GFP_IO | __GFP_WAIT);
|
||||
for_each_sg(st->sgl, sg, page_count, i) {
|
||||
sg = st->sgl;
|
||||
st->nents = 0;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
||||
if (IS_ERR(page)) {
|
||||
i915_gem_purge(dev_priv, page_count);
|
||||
@ -1810,9 +1805,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
gfp &= ~(__GFP_IO | __GFP_WAIT);
|
||||
}
|
||||
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
if (!i || page_to_pfn(page) != last_pfn + 1) {
|
||||
if (i)
|
||||
sg = sg_next(sg);
|
||||
st->nents++;
|
||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
||||
} else {
|
||||
sg->length += PAGE_SIZE;
|
||||
}
|
||||
last_pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
sg_mark_end(sg);
|
||||
obj->pages = st;
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
@ -1821,8 +1825,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
return 0;
|
||||
|
||||
err_pages:
|
||||
for_each_sg(st->sgl, sg, i, page_count)
|
||||
page_cache_release(sg_page(sg));
|
||||
sg_mark_end(sg);
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
||||
page_cache_release(sg_iter.page);
|
||||
sg_free_table(st);
|
||||
kfree(st);
|
||||
return PTR_ERR(page);
|
||||
@ -4010,7 +4015,16 @@ int i915_gem_init(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
/* VLVA0 (potential hack), BIOS isn't actually waking us */
|
||||
I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
|
||||
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
|
||||
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
|
||||
}
|
||||
|
||||
i915_gem_init_global_gtt(dev);
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
@ -4327,7 +4341,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
|
||||
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
|
||||
src = obj->pages->sgl;
|
||||
dst = st->sgl;
|
||||
for (i = 0; i < obj->pages->nents; i++) {
|
||||
sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
|
||||
sg_set_page(dst, sg_page(src), src->length, 0);
|
||||
dst = sg_next(dst);
|
||||
src = sg_next(src);
|
||||
}
|
||||
@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = dma_buf->priv;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct scatterlist *sg;
|
||||
struct sg_page_iter sg_iter;
|
||||
struct page **pages;
|
||||
int ret, i;
|
||||
|
||||
@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
|
||||
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
|
||||
if (pages == NULL)
|
||||
goto error;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
|
||||
pages[i] = sg_page(sg);
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
|
||||
pages[i++] = sg_iter.page;
|
||||
|
||||
obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
|
||||
obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
|
||||
drm_free_large(pages);
|
||||
|
||||
if (!obj->dma_buf_vmapping)
|
||||
|
@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||
int remain, ret;
|
||||
|
||||
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
|
||||
user_relocs = to_user_ptr(entry->relocs_ptr);
|
||||
|
||||
remain = entry->relocation_count;
|
||||
while (remain) {
|
||||
@ -359,8 +359,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_relocate(struct drm_device *dev,
|
||||
struct eb_objects *eb)
|
||||
i915_gem_execbuffer_relocate(struct eb_objects *eb)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
@ -475,7 +474,6 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
struct drm_file *file,
|
||||
struct list_head *objects,
|
||||
bool *need_relocs)
|
||||
{
|
||||
@ -618,7 +616,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
u64 invalid_offset = (u64)-1;
|
||||
int j;
|
||||
|
||||
user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
|
||||
user_relocs = to_user_ptr(exec[i].relocs_ptr);
|
||||
|
||||
if (copy_from_user(reloc+total, user_relocs,
|
||||
exec[i].relocation_count * sizeof(*reloc))) {
|
||||
@ -663,7 +661,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
goto err;
|
||||
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -736,7 +734,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
||||
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
||||
int length; /* limited by fault_in_pages_readable() */
|
||||
|
||||
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
||||
@ -752,7 +750,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
|
||||
length = exec[i].relocation_count *
|
||||
sizeof(struct drm_i915_gem_relocation_entry);
|
||||
/* we may also need to update the presumed offsets */
|
||||
/*
|
||||
* We must check that the entire relocation array is safe
|
||||
* to read, but since we may need to update the presumed
|
||||
* offsets during execution, check for full write access.
|
||||
*/
|
||||
if (!access_ok(VERIFY_WRITE, ptr, length))
|
||||
return -EFAULT;
|
||||
|
||||
@ -949,9 +951,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (copy_from_user(cliprects,
|
||||
(struct drm_clip_rect __user *)(uintptr_t)
|
||||
args->cliprects_ptr,
|
||||
sizeof(*cliprects)*args->num_cliprects)) {
|
||||
to_user_ptr(args->cliprects_ptr),
|
||||
sizeof(*cliprects)*args->num_cliprects)) {
|
||||
ret = -EFAULT;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
@ -986,13 +987,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
|
||||
ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* The objects are in their final locations, apply the relocations. */
|
||||
if (need_relocs)
|
||||
ret = i915_gem_execbuffer_relocate(dev, eb);
|
||||
ret = i915_gem_execbuffer_relocate(eb);
|
||||
if (ret) {
|
||||
if (ret == -EFAULT) {
|
||||
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
|
||||
@ -1115,7 +1116,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec_list,
|
||||
(void __user *)(uintptr_t)args->buffers_ptr,
|
||||
to_user_ptr(args->buffers_ptr),
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||
@ -1154,7 +1155,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
for (i = 0; i < args->buffer_count; i++)
|
||||
exec_list[i].offset = exec2_list[i].offset;
|
||||
/* ... and back out to userspace */
|
||||
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
|
||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
||||
exec_list,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
@ -1195,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = copy_from_user(exec2_list,
|
||||
(struct drm_i915_relocation_entry __user *)
|
||||
(uintptr_t) args->buffers_ptr,
|
||||
to_user_ptr(args->buffers_ptr),
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret != 0) {
|
||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||
@ -1208,7 +1208,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
||||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
|
||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
||||
exec2_list,
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
|
@ -83,7 +83,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
{
|
||||
gtt_pte_t *pt_vaddr;
|
||||
gtt_pte_t scratch_pte;
|
||||
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned last_pte, i;
|
||||
|
||||
@ -96,7 +96,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
if (last_pte > I915_PPGTT_PT_ENTRIES)
|
||||
last_pte = I915_PPGTT_PT_ENTRIES;
|
||||
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
|
||||
|
||||
for (i = first_pte; i < last_pte; i++)
|
||||
pt_vaddr[i] = scratch_pte;
|
||||
@ -105,7 +105,7 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||
|
||||
num_entries -= last_pte - first_pte;
|
||||
first_pte = 0;
|
||||
act_pd++;
|
||||
act_pt++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,42 +115,27 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
|
||||
enum i915_cache_level cache_level)
|
||||
{
|
||||
gtt_pte_t *pt_vaddr;
|
||||
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
unsigned i, j, m, segment_len;
|
||||
dma_addr_t page_addr;
|
||||
struct scatterlist *sg;
|
||||
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
|
||||
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
/* init sg walking */
|
||||
sg = pages->sgl;
|
||||
i = 0;
|
||||
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
m = 0;
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
dma_addr_t page_addr;
|
||||
|
||||
while (i < pages->nents) {
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
|
||||
page_addr = sg_dma_address(sg_iter.sg) +
|
||||
(sg_iter.sg_pgoffset << PAGE_SHIFT);
|
||||
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
|
||||
cache_level);
|
||||
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
act_pt++;
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
|
||||
act_pte = 0;
|
||||
|
||||
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
|
||||
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
|
||||
cache_level);
|
||||
|
||||
/* grab the next page */
|
||||
if (++m == segment_len) {
|
||||
if (++i == pages->nents)
|
||||
break;
|
||||
|
||||
sg = sg_next(sg);
|
||||
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
m = 0;
|
||||
}
|
||||
}
|
||||
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
first_pte = 0;
|
||||
act_pd++;
|
||||
}
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
|
||||
@ -432,21 +417,17 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
|
||||
enum i915_cache_level level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct scatterlist *sg = st->sgl;
|
||||
gtt_pte_t __iomem *gtt_entries =
|
||||
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
|
||||
int unused, i = 0;
|
||||
unsigned int len, m = 0;
|
||||
int i = 0;
|
||||
struct sg_page_iter sg_iter;
|
||||
dma_addr_t addr;
|
||||
|
||||
for_each_sg(st->sgl, sg, st->nents, unused) {
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (m = 0; m < len; m++) {
|
||||
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
|
||||
iowrite32(gen6_pte_encode(dev, addr, level),
|
||||
>t_entries[i]);
|
||||
i++;
|
||||
}
|
||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
|
||||
addr = sg_dma_address(sg_iter.sg) +
|
||||
(sg_iter.sg_pgoffset << PAGE_SHIFT);
|
||||
iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]);
|
||||
i++;
|
||||
}
|
||||
|
||||
/* XXX: This serves as a posting read to make sure that the PTE has
|
||||
@ -752,7 +733,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
|
||||
if (IS_GEN7(dev))
|
||||
if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
|
||||
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
|
||||
else
|
||||
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
|
||||
|
@ -222,8 +222,8 @@ i915_pages_create_for_stolen(struct drm_device *dev,
|
||||
}
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = offset;
|
||||
sg->length = size;
|
||||
/* we set the dummy page here only to make for_each_sg_page work */
|
||||
sg_set_page(sg, dev_priv->gtt.scratch_page, size, offset);
|
||||
|
||||
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
|
||||
sg_dma_len(sg) = size;
|
||||
|
@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page)
|
||||
void
|
||||
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
struct sg_page_iter sg_iter;
|
||||
int i;
|
||||
|
||||
if (obj->bit_17 == NULL)
|
||||
return;
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, page_count, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
struct page *page = sg_iter.page;
|
||||
char new_bit_17 = page_to_phys(page) >> 17;
|
||||
if ((new_bit_17 & 0x1) !=
|
||||
(test_bit(i, obj->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(page);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct sg_page_iter sg_iter;
|
||||
int page_count = obj->base.size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, page_count, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
if (page_to_phys(page) & (1 << 17))
|
||||
i = 0;
|
||||
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
|
||||
if (page_to_phys(sg_iter.page) & (1 << 17))
|
||||
__set_bit(i, obj->bit_17);
|
||||
else
|
||||
__clear_bit(i, obj->bit_17);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
@ -60,26 +60,30 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
|
||||
void
|
||||
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
|
||||
{
|
||||
if ((dev_priv->pipestat[pipe] & mask) != mask) {
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0x7fff0000;
|
||||
|
||||
dev_priv->pipestat[pipe] |= mask;
|
||||
/* Enable the interrupt, clear any pending status */
|
||||
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
if ((pipestat & mask) == mask)
|
||||
return;
|
||||
|
||||
/* Enable the interrupt, clear any pending status */
|
||||
pipestat |= mask | (mask >> 16);
|
||||
I915_WRITE(reg, pipestat);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
void
|
||||
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
|
||||
{
|
||||
if ((dev_priv->pipestat[pipe] & mask) != 0) {
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0x7fff0000;
|
||||
|
||||
dev_priv->pipestat[pipe] &= ~mask;
|
||||
I915_WRITE(reg, dev_priv->pipestat[pipe]);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
if ((pipestat & mask) == 0)
|
||||
return;
|
||||
|
||||
pipestat &= ~mask;
|
||||
I915_WRITE(reg, pipestat);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -250,10 +254,9 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
|
||||
struct timeval *vblank_time,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (pipe < 0 || pipe >= dev_priv->num_pipe) {
|
||||
if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
|
||||
DRM_ERROR("Invalid crtc %d\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -937,6 +940,8 @@ static void i915_error_work_func(struct work_struct *work)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
wake_up_all(&ring->irq_queue);
|
||||
|
||||
intel_display_handle_reset(dev);
|
||||
|
||||
wake_up_all(&dev_priv->gpu_error.reset_queue);
|
||||
}
|
||||
}
|
||||
@ -972,24 +977,23 @@ static void i915_get_extra_instdone(struct drm_device *dev,
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static struct drm_i915_error_object *
|
||||
i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_object *src)
|
||||
i915_error_object_create_sized(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_gem_object *src,
|
||||
const int num_pages)
|
||||
{
|
||||
struct drm_i915_error_object *dst;
|
||||
int i, count;
|
||||
int i;
|
||||
u32 reloc_offset;
|
||||
|
||||
if (src == NULL || src->pages == NULL)
|
||||
return NULL;
|
||||
|
||||
count = src->base.size / PAGE_SIZE;
|
||||
|
||||
dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
|
||||
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
|
||||
if (dst == NULL)
|
||||
return NULL;
|
||||
|
||||
reloc_offset = src->gtt_offset;
|
||||
for (i = 0; i < count; i++) {
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
unsigned long flags;
|
||||
void *d;
|
||||
|
||||
@ -1039,7 +1043,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
|
||||
reloc_offset += PAGE_SIZE;
|
||||
}
|
||||
dst->page_count = count;
|
||||
dst->page_count = num_pages;
|
||||
dst->gtt_offset = src->gtt_offset;
|
||||
|
||||
return dst;
|
||||
@ -1050,6 +1054,9 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
|
||||
kfree(dst);
|
||||
return NULL;
|
||||
}
|
||||
#define i915_error_object_create(dev_priv, src) \
|
||||
i915_error_object_create_sized((dev_priv), (src), \
|
||||
(src)->base.size>>PAGE_SHIFT)
|
||||
|
||||
static void
|
||||
i915_error_object_free(struct drm_i915_error_object *obj)
|
||||
@ -1256,6 +1263,26 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
error->cpu_ring_tail[ring->id] = ring->tail;
|
||||
}
|
||||
|
||||
|
||||
static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
|
||||
struct drm_i915_error_state *error,
|
||||
struct drm_i915_error_ring *ering)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
/* Currently render ring is the only HW context user */
|
||||
if (ring->id != RCS || !error->ccid)
|
||||
return;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
|
||||
if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
|
||||
ering->ctx = i915_error_object_create_sized(dev_priv,
|
||||
obj, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_gem_record_rings(struct drm_device *dev,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
@ -1273,6 +1300,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
|
||||
error->ring[i].ringbuffer =
|
||||
i915_error_object_create(dev_priv, ring->obj);
|
||||
|
||||
|
||||
i915_gem_record_active_context(ring, error, &error->ring[i]);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(request, &ring->request_list, list)
|
||||
count++;
|
||||
@ -1328,14 +1358,15 @@ static void i915_capture_error_state(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_INFO("capturing error event; look for more information in"
|
||||
DRM_INFO("capturing error event; look for more information in "
|
||||
"/sys/kernel/debug/dri/%d/i915_error_state\n",
|
||||
dev->primary->index);
|
||||
|
||||
kref_init(&error->ref);
|
||||
error->eir = I915_READ(EIR);
|
||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||
error->ccid = I915_READ(CCID);
|
||||
if (HAS_HW_CONTEXTS(dev))
|
||||
error->ccid = I915_READ(CCID);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
|
||||
@ -1567,7 +1598,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
|
||||
queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
|
||||
}
|
||||
|
||||
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
|
||||
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
@ -1777,6 +1808,37 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool semaphore_passed(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
|
||||
struct intel_ring_buffer *signaller;
|
||||
u32 cmd, ipehr, acthd_min;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
if ((ipehr & ~(0x3 << 16)) !=
|
||||
(MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
|
||||
return false;
|
||||
|
||||
/* ACTHD is likely pointing to the dword after the actual command,
|
||||
* so scan backwards until we find the MBOX.
|
||||
*/
|
||||
acthd_min = max((int)acthd - 3 * 4, 0);
|
||||
do {
|
||||
cmd = ioread32(ring->virtual_start + acthd);
|
||||
if (cmd == ipehr)
|
||||
break;
|
||||
|
||||
acthd -= 4;
|
||||
if (acthd < acthd_min)
|
||||
return false;
|
||||
} while (1);
|
||||
|
||||
signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
|
||||
return i915_seqno_passed(signaller->get_seqno(signaller, false),
|
||||
ioread32(ring->virtual_start+acthd+4)+1);
|
||||
}
|
||||
|
||||
static bool kick_ring(struct intel_ring_buffer *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
@ -1788,6 +1850,15 @@ static bool kick_ring(struct intel_ring_buffer *ring)
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6 &&
|
||||
tmp & RING_WAIT_SEMAPHORE &&
|
||||
semaphore_passed(ring)) {
|
||||
DRM_ERROR("Kicking stuck semaphore on %s\n",
|
||||
ring->name);
|
||||
I915_WRITE_CTL(ring, tmp);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2089,9 +2160,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
|
||||
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
|
||||
/* Hack for broken MSIs on VLV */
|
||||
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
|
||||
pci_read_config_word(dev->pdev, 0x98, &msid);
|
||||
@ -2221,9 +2289,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
|
||||
I915_WRITE16(EMR,
|
||||
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
||||
@ -2246,6 +2311,37 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true when a page flip has completed.
|
||||
*/
|
||||
static bool i8xx_handle_vblank(struct drm_device *dev,
|
||||
int pipe, u16 iir)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
|
||||
|
||||
if (!drm_handle_vblank(dev, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
return false;
|
||||
|
||||
intel_prepare_page_flip(dev, pipe);
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
* the flip is completed (no longer pending). Since this doesn't raise
|
||||
* an interrupt per se, we watch for the change at vblank.
|
||||
*/
|
||||
if (I915_READ16(ISR) & flip_pending)
|
||||
return false;
|
||||
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
@ -2301,22 +2397,12 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
notify_ring(dev, &dev_priv->ring[RCS]);
|
||||
|
||||
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
|
||||
drm_handle_vblank(dev, 0)) {
|
||||
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
intel_finish_page_flip(dev, 0);
|
||||
flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
|
||||
}
|
||||
}
|
||||
i8xx_handle_vblank(dev, 0, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
|
||||
|
||||
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
|
||||
drm_handle_vblank(dev, 1)) {
|
||||
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
|
||||
intel_prepare_page_flip(dev, 1);
|
||||
intel_finish_page_flip(dev, 1);
|
||||
flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
}
|
||||
}
|
||||
i8xx_handle_vblank(dev, 1, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
|
||||
|
||||
iir = new_iir;
|
||||
}
|
||||
@ -2364,9 +2450,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
||||
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
||||
u32 enable_mask;
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
|
||||
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
|
||||
|
||||
/* Unmask the interrupts that we always want on. */
|
||||
@ -2433,6 +2516,37 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true when a page flip has completed.
|
||||
*/
|
||||
static bool i915_handle_vblank(struct drm_device *dev,
|
||||
int plane, int pipe, u32 iir)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (!drm_handle_vblank(dev, pipe))
|
||||
return false;
|
||||
|
||||
if ((iir & flip_pending) == 0)
|
||||
return false;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
|
||||
/* We detect FlipDone by looking for the change in PendingFlip from '1'
|
||||
* to '0' on the following vblank, i.e. IIR has the Pendingflip
|
||||
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
|
||||
* the flip is completed (no longer pending). Since this doesn't raise
|
||||
* an interrupt per se, we watch for the change at vblank.
|
||||
*/
|
||||
if (I915_READ(ISR) & flip_pending)
|
||||
return false;
|
||||
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
@ -2442,10 +2556,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
u32 flip[2] = {
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
||||
};
|
||||
int pipe, ret = IRQ_NONE;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
@ -2507,14 +2617,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
int plane = pipe;
|
||||
if (IS_MOBILE(dev))
|
||||
plane = !plane;
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
|
||||
drm_handle_vblank(dev, pipe)) {
|
||||
if (iir & flip[plane]) {
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
flip_mask &= ~flip[plane];
|
||||
}
|
||||
}
|
||||
i915_handle_vblank(dev, plane, pipe, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
|
||||
blc_event = true;
|
||||
@ -2603,13 +2709,13 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
||||
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
|
||||
|
||||
enable_mask = ~dev_priv->irq_mask;
|
||||
enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
|
||||
enable_mask |= I915_USER_INTERRUPT;
|
||||
|
||||
if (IS_G4X(dev))
|
||||
enable_mask |= I915_BSD_USER_INTERRUPT;
|
||||
|
||||
dev_priv->pipestat[0] = 0;
|
||||
dev_priv->pipestat[1] = 0;
|
||||
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
|
||||
|
||||
/*
|
||||
@ -2689,6 +2795,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
unsigned long irqflags;
|
||||
int irq_received;
|
||||
int ret = IRQ_NONE, pipe;
|
||||
u32 flip_mask =
|
||||
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
|
||||
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
|
||||
|
||||
atomic_inc(&dev_priv->irq_received);
|
||||
|
||||
@ -2697,7 +2806,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
for (;;) {
|
||||
bool blc_event = false;
|
||||
|
||||
irq_received = iir != 0;
|
||||
irq_received = (iir & ~flip_mask) != 0;
|
||||
|
||||
/* Can't rely on pipestat interrupt bit in iir as it might
|
||||
* have been cleared after the pipestat interrupt was received.
|
||||
@ -2744,7 +2853,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
I915_READ(PORT_HOTPLUG_STAT);
|
||||
}
|
||||
|
||||
I915_WRITE(IIR, iir);
|
||||
I915_WRITE(IIR, iir & ~flip_mask);
|
||||
new_iir = I915_READ(IIR); /* Flush posted writes */
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
@ -2752,18 +2861,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
if (iir & I915_BSD_USER_INTERRUPT)
|
||||
notify_ring(dev, &dev_priv->ring[VCS]);
|
||||
|
||||
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
|
||||
intel_prepare_page_flip(dev, 0);
|
||||
|
||||
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
|
||||
intel_prepare_page_flip(dev, 1);
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
|
||||
drm_handle_vblank(dev, pipe)) {
|
||||
i915_pageflip_stall_check(dev, pipe);
|
||||
intel_finish_page_flip(dev, pipe);
|
||||
}
|
||||
i915_handle_vblank(dev, pipe, pipe, iir))
|
||||
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
|
||||
|
||||
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
|
||||
blc_event = true;
|
||||
|
@ -121,6 +121,7 @@
|
||||
|
||||
#define GAM_ECOCHK 0x4090
|
||||
#define ECOCHK_SNB_BIT (1<<10)
|
||||
#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6)
|
||||
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
|
||||
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
|
||||
|
||||
@ -522,6 +523,9 @@
|
||||
#define GEN7_ERR_INT 0x44040
|
||||
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
|
||||
|
||||
#define FPGA_DBG 0x42300
|
||||
#define FPGA_DBG_RM_NOCLAIM (1<<31)
|
||||
|
||||
#define DERRMR 0x44050
|
||||
|
||||
/* GM45+ chicken bits -- debug workaround bits that may be required
|
||||
@ -591,6 +595,7 @@
|
||||
#define I915_USER_INTERRUPT (1<<1)
|
||||
#define I915_ASLE_INTERRUPT (1<<0)
|
||||
#define I915_BSD_USER_INTERRUPT (1<<25)
|
||||
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
|
||||
#define EIR 0x020b0
|
||||
#define EMR 0x020b4
|
||||
#define ESR 0x020b8
|
||||
@ -1676,42 +1681,63 @@
|
||||
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
|
||||
#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
|
||||
|
||||
/* SDVO port control */
|
||||
#define SDVOB 0x61140
|
||||
#define SDVOC 0x61160
|
||||
#define SDVO_ENABLE (1 << 31)
|
||||
#define SDVO_PIPE_B_SELECT (1 << 30)
|
||||
#define SDVO_STALL_SELECT (1 << 29)
|
||||
#define SDVO_INTERRUPT_ENABLE (1 << 26)
|
||||
/* SDVO and HDMI port control.
|
||||
* The same register may be used for SDVO or HDMI */
|
||||
#define GEN3_SDVOB 0x61140
|
||||
#define GEN3_SDVOC 0x61160
|
||||
#define GEN4_HDMIB GEN3_SDVOB
|
||||
#define GEN4_HDMIC GEN3_SDVOC
|
||||
#define PCH_SDVOB 0xe1140
|
||||
#define PCH_HDMIB PCH_SDVOB
|
||||
#define PCH_HDMIC 0xe1150
|
||||
#define PCH_HDMID 0xe1160
|
||||
|
||||
/* Gen 3 SDVO bits: */
|
||||
#define SDVO_ENABLE (1 << 31)
|
||||
#define SDVO_PIPE_SEL(pipe) ((pipe) << 30)
|
||||
#define SDVO_PIPE_SEL_MASK (1 << 30)
|
||||
#define SDVO_PIPE_B_SELECT (1 << 30)
|
||||
#define SDVO_STALL_SELECT (1 << 29)
|
||||
#define SDVO_INTERRUPT_ENABLE (1 << 26)
|
||||
/**
|
||||
* 915G/GM SDVO pixel multiplier.
|
||||
*
|
||||
* Programmed value is multiplier - 1, up to 5x.
|
||||
*
|
||||
* \sa DPLL_MD_UDI_MULTIPLIER_MASK
|
||||
*/
|
||||
#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
|
||||
#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
|
||||
#define SDVO_PORT_MULTIPLY_SHIFT 23
|
||||
#define SDVO_PHASE_SELECT_MASK (15 << 19)
|
||||
#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
|
||||
#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
|
||||
#define SDVOC_GANG_MODE (1 << 16)
|
||||
#define SDVO_ENCODING_SDVO (0x0 << 10)
|
||||
#define SDVO_ENCODING_HDMI (0x2 << 10)
|
||||
/** Requird for HDMI operation */
|
||||
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
|
||||
#define SDVO_COLOR_RANGE_16_235 (1 << 8)
|
||||
#define SDVO_BORDER_ENABLE (1 << 7)
|
||||
#define SDVO_AUDIO_ENABLE (1 << 6)
|
||||
/** New with 965, default is to be set */
|
||||
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
|
||||
/** New with 965, default is to be set */
|
||||
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
|
||||
#define SDVOB_PCIE_CONCURRENCY (1 << 3)
|
||||
#define SDVO_DETECTED (1 << 2)
|
||||
#define SDVO_PHASE_SELECT_MASK (15 << 19)
|
||||
#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
|
||||
#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
|
||||
#define SDVOC_GANG_MODE (1 << 16) /* Port C only */
|
||||
#define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */
|
||||
#define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */
|
||||
#define SDVO_DETECTED (1 << 2)
|
||||
/* Bits to be preserved when writing */
|
||||
#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
|
||||
#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
|
||||
#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
|
||||
SDVO_INTERRUPT_ENABLE)
|
||||
#define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
|
||||
|
||||
/* Gen 4 SDVO/HDMI bits: */
|
||||
#define SDVO_COLOR_FORMAT_8bpc (0 << 26)
|
||||
#define SDVO_ENCODING_SDVO (0 << 10)
|
||||
#define SDVO_ENCODING_HDMI (2 << 10)
|
||||
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
|
||||
#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
|
||||
#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
|
||||
#define SDVO_AUDIO_ENABLE (1 << 6)
|
||||
/* VSYNC/HSYNC bits new with 965, default is to be set */
|
||||
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
|
||||
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
|
||||
|
||||
/* Gen 5 (IBX) SDVO/HDMI bits: */
|
||||
#define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */
|
||||
#define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */
|
||||
|
||||
/* Gen 6 (CPT) SDVO/HDMI bits: */
|
||||
#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29)
|
||||
#define SDVO_PIPE_SEL_MASK_CPT (3 << 29)
|
||||
|
||||
|
||||
/* DVO port control */
|
||||
#define DVOA 0x61120
|
||||
@ -1898,7 +1924,7 @@
|
||||
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
|
||||
|
||||
/* Backlight control */
|
||||
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
|
||||
#define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
|
||||
#define BLM_PWM_ENABLE (1 << 31)
|
||||
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
|
||||
#define BLM_PIPE_SELECT (1 << 29)
|
||||
@ -1917,7 +1943,7 @@
|
||||
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
|
||||
#define BLM_PHASE_IN_INCR_SHIFT (0)
|
||||
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
|
||||
#define BLC_PWM_CTL 0x61254
|
||||
#define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254)
|
||||
/*
|
||||
* This is the most significant 15 bits of the number of backlight cycles in a
|
||||
* complete cycle of the modulated backlight control.
|
||||
@ -1939,7 +1965,7 @@
|
||||
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
|
||||
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
|
||||
|
||||
#define BLC_HIST_CTL 0x61260
|
||||
#define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260)
|
||||
|
||||
/* New registers for PCH-split platforms. Safe where new bits show up, the
|
||||
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
|
||||
@ -2776,6 +2802,8 @@
|
||||
#define DSPFW_HPLL_CURSOR_SHIFT 16
|
||||
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
|
||||
#define DSPFW_HPLL_SR_MASK (0x1ff)
|
||||
#define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070)
|
||||
#define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c)
|
||||
|
||||
/* drain latency register values*/
|
||||
#define DRAIN_LATENCY_PRECISION_32 32
|
||||
@ -3754,14 +3782,16 @@
|
||||
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
|
||||
#define HSW_VIDEO_DIP_GCP_B 0x61210
|
||||
|
||||
#define HSW_TVIDEO_DIP_CTL(pipe) \
|
||||
_PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
|
||||
#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
|
||||
_PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
|
||||
#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
|
||||
_PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
|
||||
#define HSW_TVIDEO_DIP_GCP(pipe) \
|
||||
_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
|
||||
#define HSW_TVIDEO_DIP_CTL(trans) \
|
||||
_TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
|
||||
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
|
||||
_TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
|
||||
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
|
||||
_TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
|
||||
#define HSW_TVIDEO_DIP_GCP(trans) \
|
||||
_TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
|
||||
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
|
||||
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
|
||||
|
||||
#define _TRANS_HTOTAL_B 0xe1000
|
||||
#define _TRANS_HBLANK_B 0xe1004
|
||||
@ -3976,34 +4006,6 @@
|
||||
#define FDI_PLL_CTL_1 0xfe000
|
||||
#define FDI_PLL_CTL_2 0xfe004
|
||||
|
||||
/* or SDVOB */
|
||||
#define HDMIB 0xe1140
|
||||
#define PORT_ENABLE (1 << 31)
|
||||
#define TRANSCODER(pipe) ((pipe) << 30)
|
||||
#define TRANSCODER_CPT(pipe) ((pipe) << 29)
|
||||
#define TRANSCODER_MASK (1 << 30)
|
||||
#define TRANSCODER_MASK_CPT (3 << 29)
|
||||
#define COLOR_FORMAT_8bpc (0)
|
||||
#define COLOR_FORMAT_12bpc (3 << 26)
|
||||
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
|
||||
#define SDVO_ENCODING (0)
|
||||
#define TMDS_ENCODING (2 << 10)
|
||||
#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
|
||||
/* CPT */
|
||||
#define HDMI_MODE_SELECT (1 << 9)
|
||||
#define DVI_MODE_SELECT (0)
|
||||
#define SDVOB_BORDER_ENABLE (1 << 7)
|
||||
#define AUDIO_ENABLE (1 << 6)
|
||||
#define VSYNC_ACTIVE_HIGH (1 << 4)
|
||||
#define HSYNC_ACTIVE_HIGH (1 << 3)
|
||||
#define PORT_DETECTED (1 << 2)
|
||||
|
||||
/* PCH SDVOB multiplex with HDMIB */
|
||||
#define PCH_SDVOB HDMIB
|
||||
|
||||
#define HDMIC 0xe1150
|
||||
#define HDMID 0xe1160
|
||||
|
||||
#define PCH_LVDS 0xe1180
|
||||
#define LVDS_DETECTED (1 << 1)
|
||||
|
||||
@ -4149,8 +4151,12 @@
|
||||
#define FORCEWAKE 0xA18C
|
||||
#define FORCEWAKE_VLV 0x1300b0
|
||||
#define FORCEWAKE_ACK_VLV 0x1300b4
|
||||
#define FORCEWAKE_MEDIA_VLV 0x1300b8
|
||||
#define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc
|
||||
#define FORCEWAKE_ACK_HSW 0x130044
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
#define VLV_GTLC_WAKE_CTRL 0x130090
|
||||
#define VLV_GTLC_PW_STATUS 0x130094
|
||||
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
|
||||
#define FORCEWAKE_KERNEL 0x1
|
||||
#define FORCEWAKE_USER 0x2
|
||||
|
@ -209,7 +209,8 @@ static void i915_save_display(struct drm_device *dev)
|
||||
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
|
||||
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
|
||||
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
|
||||
} else {
|
||||
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
|
||||
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
|
||||
@ -255,6 +256,7 @@ static void i915_save_display(struct drm_device *dev)
|
||||
static void i915_restore_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 mask = 0xffffffff;
|
||||
|
||||
/* Display arbitration */
|
||||
if (INTEL_INFO(dev)->gen <= 4)
|
||||
@ -267,10 +269,13 @@ static void i915_restore_display(struct drm_device *dev)
|
||||
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
|
||||
} else if (IS_MOBILE(dev) && !IS_I830(dev))
|
||||
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
mask = ~LVDS_PORT_EN;
|
||||
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
|
||||
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
|
||||
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
|
||||
|
||||
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
|
||||
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
|
||||
|
@ -49,7 +49,7 @@ static ssize_t
|
||||
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
|
||||
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -57,7 +57,7 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
|
||||
return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -65,7 +65,7 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
|
||||
return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -73,7 +73,7 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
|
||||
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
|
||||
return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
|
||||
@ -215,7 +215,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
||||
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
@ -229,7 +229,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
|
||||
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
@ -280,7 +280,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
|
||||
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d", ret);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
|
||||
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
@ -355,7 +355,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
return snprintf(buf, PAGE_SIZE, "%d", val);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||
}
|
||||
|
||||
static const struct attribute *gen6_attrs[] = {
|
||||
|
@ -1341,15 +1341,15 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t tmp;
|
||||
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
|
||||
if (type == INTEL_OUTPUT_EDP) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
ironlake_edp_backlight_off(intel_dp);
|
||||
}
|
||||
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
}
|
||||
|
||||
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
|
||||
@ -1537,9 +1537,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
|
||||
DDI_BUF_PORT_REVERSAL;
|
||||
if (hdmi_connector)
|
||||
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
|
||||
else
|
||||
intel_dig_port->hdmi.sdvox_reg = 0;
|
||||
intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
|
||||
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
|
||||
|
@ -71,8 +71,24 @@ typedef struct intel_limit intel_limit_t;
|
||||
struct intel_limit {
|
||||
intel_range_t dot, vco, n, m, m1, m2, p, p1;
|
||||
intel_p2_t p2;
|
||||
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
|
||||
int, int, intel_clock_t *, intel_clock_t *);
|
||||
/**
|
||||
* find_pll() - Find the best values for the PLL
|
||||
* @limit: limits for the PLL
|
||||
* @crtc: current CRTC
|
||||
* @target: target frequency in kHz
|
||||
* @refclk: reference clock frequency in kHz
|
||||
* @match_clock: if provided, @best_clock P divider must
|
||||
* match the P divider from @match_clock
|
||||
* used for LVDS downclocking
|
||||
* @best_clock: best PLL values found
|
||||
*
|
||||
* Returns true on success, false on failure.
|
||||
*/
|
||||
bool (*find_pll)(const intel_limit_t *limit,
|
||||
struct drm_crtc *crtc,
|
||||
int target, int refclk,
|
||||
intel_clock_t *match_clock,
|
||||
intel_clock_t *best_clock);
|
||||
};
|
||||
|
||||
/* FDI */
|
||||
@ -471,7 +487,6 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_is_dual_link_lvds(dev)) {
|
||||
/* LVDS dual channel */
|
||||
if (refclk == 100000)
|
||||
limit = &intel_limits_ironlake_dual_lvds_100m;
|
||||
else
|
||||
@ -498,10 +513,8 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_is_dual_link_lvds(dev))
|
||||
/* LVDS with dual channel */
|
||||
limit = &intel_limits_g4x_dual_channel_lvds;
|
||||
else
|
||||
/* LVDS with dual channel */
|
||||
limit = &intel_limits_g4x_single_channel_lvds;
|
||||
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
|
||||
@ -1254,7 +1267,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
|
||||
int cur_pipe;
|
||||
|
||||
/* Planes are fixed to pipes on ILK+ */
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev)) {
|
||||
if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
reg = DSPCNTR(pipe);
|
||||
val = I915_READ(reg);
|
||||
WARN((val & DISPLAY_PLANE_ENABLE),
|
||||
@ -1327,14 +1340,14 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
|
||||
static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, u32 val)
|
||||
{
|
||||
if ((val & PORT_ENABLE) == 0)
|
||||
if ((val & SDVO_ENABLE) == 0)
|
||||
return false;
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv->dev)) {
|
||||
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
|
||||
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
|
||||
return false;
|
||||
} else {
|
||||
if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
|
||||
if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1392,7 +1405,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
|
||||
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
|
||||
reg, pipe_name(pipe));
|
||||
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
|
||||
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
|
||||
&& (val & SDVO_PIPE_B_SELECT),
|
||||
"IBX PCH hdmi port still using transcoder B\n");
|
||||
}
|
||||
@ -1419,9 +1432,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
|
||||
"PCH LVDS enabled on transcoder %c, should be disabled\n",
|
||||
pipe_name(pipe));
|
||||
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
|
||||
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2229,6 +2242,44 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
return dev_priv->display.update_plane(crtc, fb, x, y);
|
||||
}
|
||||
|
||||
void intel_display_handle_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
/*
|
||||
* Flips in the rings have been nuked by the reset,
|
||||
* so complete all pending flips so that user space
|
||||
* will get its events and not get stuck.
|
||||
*
|
||||
* Also update the base address of all primary
|
||||
* planes to the the last fb to make sure we're
|
||||
* showing the correct fb after a reset.
|
||||
*
|
||||
* Need to make two loops over the crtcs so that we
|
||||
* don't try to grab a crtc mutex before the
|
||||
* pending_flip_queue really got woken up.
|
||||
*/
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum plane plane = intel_crtc->plane;
|
||||
|
||||
intel_prepare_page_flip(dev, plane);
|
||||
intel_finish_page_flip_plane(dev, plane);
|
||||
}
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
mutex_lock(&crtc->mutex);
|
||||
if (intel_crtc->active)
|
||||
dev_priv->display.update_plane(crtc, crtc->fb,
|
||||
crtc->x, crtc->y);
|
||||
mutex_unlock(&crtc->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
intel_finish_fb(struct drm_framebuffer *old_fb)
|
||||
{
|
||||
@ -2295,10 +2346,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(intel_crtc->plane > dev_priv->num_pipe) {
|
||||
if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
|
||||
DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
|
||||
intel_crtc->plane,
|
||||
dev_priv->num_pipe);
|
||||
INTEL_INFO(dev)->num_pipes);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2312,9 +2363,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (crtc->fb)
|
||||
intel_finish_fb(crtc->fb);
|
||||
|
||||
ret = dev_priv->display.update_plane(crtc, fb, x, y);
|
||||
if (ret) {
|
||||
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
|
||||
@ -4512,11 +4560,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
|
||||
dpll |= PLL_P2_DIVIDE_BY_4;
|
||||
}
|
||||
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
|
||||
/* XXX: just matching BIOS for now */
|
||||
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
|
||||
dpll |= 3;
|
||||
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
|
||||
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
|
||||
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
|
||||
else
|
||||
@ -4699,10 +4743,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
||||
/* Set up the display plane register */
|
||||
dspcntr = DISPPLANE_GAMMA_ENABLE;
|
||||
|
||||
if (pipe == 0)
|
||||
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
|
||||
else
|
||||
dspcntr |= DISPPLANE_SEL_PIPE_B;
|
||||
if (!IS_VALLEYVIEW(dev)) {
|
||||
if (pipe == 0)
|
||||
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
|
||||
else
|
||||
dspcntr |= DISPPLANE_SEL_PIPE_B;
|
||||
}
|
||||
|
||||
if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
|
||||
/* Enable pixel doubling when the dot clock is > 90% of the (display)
|
||||
@ -5344,7 +5390,7 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (dev_priv->num_pipe == 2)
|
||||
if (INTEL_INFO(dev)->num_pipes == 2)
|
||||
return true;
|
||||
|
||||
switch (intel_crtc->pipe) {
|
||||
@ -6436,20 +6482,6 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
|
||||
intel_crtc_load_lut(crtc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a pipe with a simple mode set on it for doing load-based monitor
|
||||
* detection.
|
||||
*
|
||||
* It will be up to the load-detect code to adjust the pipe as appropriate for
|
||||
* its requirements. The pipe will be connected to no other encoders.
|
||||
*
|
||||
* Currently this code will only succeed if there is a pipe with no encoders
|
||||
* configured for it. In the future, it could choose to temporarily disable
|
||||
* some outputs to free up a pipe for its use.
|
||||
*
|
||||
* \return crtc, or NULL if no pipes are available.
|
||||
*/
|
||||
|
||||
/* VESA 640x480x72Hz mode to set on the pipe */
|
||||
static struct drm_display_mode load_detect_mode = {
|
||||
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
|
||||
@ -6954,7 +6986,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_unpin_work *work;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long flags;
|
||||
|
||||
/* Ignore early vblank irqs */
|
||||
@ -6984,8 +7015,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
obj = work->old_fb_obj;
|
||||
|
||||
wake_up_all(&dev_priv->pending_flip_queue);
|
||||
|
||||
queue_work(dev_priv->wq, &work->work);
|
||||
@ -8145,6 +8174,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
|
||||
goto fail;
|
||||
}
|
||||
} else if (config->fb_changed) {
|
||||
intel_crtc_wait_for_pending_flips(set->crtc);
|
||||
|
||||
ret = intel_pipe_set_base(set->crtc,
|
||||
set->x, set->y, set->fb);
|
||||
}
|
||||
@ -8343,20 +8374,20 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
if (has_edp_a(dev))
|
||||
intel_dp_init(dev, DP_A, PORT_A);
|
||||
|
||||
if (I915_READ(HDMIB) & PORT_DETECTED) {
|
||||
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
|
||||
/* PCH SDVOB multiplex with HDMIB */
|
||||
found = intel_sdvo_init(dev, PCH_SDVOB, true);
|
||||
if (!found)
|
||||
intel_hdmi_init(dev, HDMIB, PORT_B);
|
||||
intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
|
||||
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
|
||||
intel_dp_init(dev, PCH_DP_B, PORT_B);
|
||||
}
|
||||
|
||||
if (I915_READ(HDMIC) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, HDMIC, PORT_C);
|
||||
if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
|
||||
intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
|
||||
|
||||
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, HDMID, PORT_D);
|
||||
if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
|
||||
intel_hdmi_init(dev, PCH_HDMID, PORT_D);
|
||||
|
||||
if (I915_READ(PCH_DP_C) & DP_DETECTED)
|
||||
intel_dp_init(dev, PCH_DP_C, PORT_C);
|
||||
@ -8368,24 +8399,21 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
|
||||
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
|
||||
|
||||
if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
|
||||
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
|
||||
PORT_B);
|
||||
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
|
||||
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
|
||||
}
|
||||
|
||||
if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
|
||||
|
||||
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
|
||||
bool found = false;
|
||||
|
||||
if (I915_READ(SDVOB) & SDVO_DETECTED) {
|
||||
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
|
||||
DRM_DEBUG_KMS("probing SDVOB\n");
|
||||
found = intel_sdvo_init(dev, SDVOB, true);
|
||||
found = intel_sdvo_init(dev, GEN3_SDVOB, true);
|
||||
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
|
||||
intel_hdmi_init(dev, SDVOB, PORT_B);
|
||||
intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
|
||||
}
|
||||
|
||||
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
@ -8396,16 +8424,16 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
|
||||
/* Before G4X SDVOC doesn't have its own detect register */
|
||||
|
||||
if (I915_READ(SDVOB) & SDVO_DETECTED) {
|
||||
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
|
||||
DRM_DEBUG_KMS("probing SDVOC\n");
|
||||
found = intel_sdvo_init(dev, SDVOC, false);
|
||||
found = intel_sdvo_init(dev, GEN3_SDVOC, false);
|
||||
}
|
||||
|
||||
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
|
||||
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
|
||||
|
||||
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
|
||||
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
|
||||
intel_hdmi_init(dev, SDVOC, PORT_C);
|
||||
intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
|
||||
}
|
||||
if (SUPPORTS_INTEGRATED_DP(dev)) {
|
||||
DRM_DEBUG_KMS("probing DP_C\n");
|
||||
@ -8572,7 +8600,6 @@ static void intel_init_display(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* We always want a DPMS function */
|
||||
if (HAS_DDI(dev)) {
|
||||
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
|
||||
dev_priv->display.crtc_enable = haswell_crtc_enable;
|
||||
@ -8859,9 +8886,10 @@ void intel_modeset_init(struct drm_device *dev)
|
||||
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
|
||||
|
||||
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
||||
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
|
||||
INTEL_INFO(dev)->num_pipes,
|
||||
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
|
||||
|
||||
for (i = 0; i < dev_priv->num_pipe; i++) {
|
||||
for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
|
||||
intel_crtc_init(dev, i);
|
||||
ret = intel_plane_init(dev, i);
|
||||
if (ret)
|
||||
@ -8918,10 +8946,11 @@ static void intel_enable_pipe_a(struct drm_device *dev)
|
||||
static bool
|
||||
intel_check_plane_mapping(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg, val;
|
||||
|
||||
if (dev_priv->num_pipe == 1)
|
||||
if (INTEL_INFO(dev)->num_pipes == 1)
|
||||
return true;
|
||||
|
||||
reg = DSPCNTR(!crtc->plane);
|
||||
@ -9323,15 +9352,23 @@ intel_display_capture_error_state(struct drm_device *dev)
|
||||
for_each_pipe(i) {
|
||||
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
|
||||
|
||||
error->cursor[i].control = I915_READ(CURCNTR(i));
|
||||
error->cursor[i].position = I915_READ(CURPOS(i));
|
||||
error->cursor[i].base = I915_READ(CURBASE(i));
|
||||
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
|
||||
error->cursor[i].control = I915_READ(CURCNTR(i));
|
||||
error->cursor[i].position = I915_READ(CURPOS(i));
|
||||
error->cursor[i].base = I915_READ(CURBASE(i));
|
||||
} else {
|
||||
error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
|
||||
error->cursor[i].position = I915_READ(CURPOS_IVB(i));
|
||||
error->cursor[i].base = I915_READ(CURBASE_IVB(i));
|
||||
}
|
||||
|
||||
error->plane[i].control = I915_READ(DSPCNTR(i));
|
||||
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
|
||||
error->plane[i].size = I915_READ(DSPSIZE(i));
|
||||
if (INTEL_INFO(dev)->gen <= 3)
|
||||
error->plane[i].size = I915_READ(DSPSIZE(i));
|
||||
error->plane[i].pos = I915_READ(DSPPOS(i));
|
||||
error->plane[i].addr = I915_READ(DSPADDR(i));
|
||||
if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
|
||||
error->plane[i].addr = I915_READ(DSPADDR(i));
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
error->plane[i].surface = I915_READ(DSPSURF(i));
|
||||
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
|
||||
@ -9355,10 +9392,9 @@ intel_display_print_error_state(struct seq_file *m,
|
||||
struct drm_device *dev,
|
||||
struct intel_display_error_state *error)
|
||||
{
|
||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
|
||||
seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
|
||||
for_each_pipe(i) {
|
||||
seq_printf(m, "Pipe [%d]:\n", i);
|
||||
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
|
||||
@ -9373,9 +9409,11 @@ intel_display_print_error_state(struct seq_file *m,
|
||||
seq_printf(m, "Plane [%d]:\n", i);
|
||||
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
|
||||
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
|
||||
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
|
||||
if (INTEL_INFO(dev)->gen <= 3)
|
||||
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
|
||||
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
|
||||
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
|
||||
if (!IS_HASWELL(dev))
|
||||
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
|
||||
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
|
||||
|
@ -328,29 +328,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t ch_ctl = intel_dp->output_reg + 0x10;
|
||||
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
|
||||
uint32_t status;
|
||||
bool done;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
ch_ctl = DPA_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_B:
|
||||
ch_ctl = PCH_DPB_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_C:
|
||||
ch_ctl = PCH_DPC_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_D:
|
||||
ch_ctl = PCH_DPD_AUX_CH_CTL;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
|
||||
if (has_aux_irq)
|
||||
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
|
||||
@ -370,11 +351,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
uint8_t *send, int send_bytes,
|
||||
uint8_t *recv, int recv_size)
|
||||
{
|
||||
uint32_t output_reg = intel_dp->output_reg;
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t ch_ctl = output_reg + 0x10;
|
||||
uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
|
||||
uint32_t ch_data = ch_ctl + 4;
|
||||
int i, ret, recv_bytes;
|
||||
uint32_t status;
|
||||
@ -388,29 +368,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
*/
|
||||
pm_qos_update_request(&dev_priv->pm_qos, 0);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
ch_ctl = DPA_AUX_CH_CTL;
|
||||
ch_data = DPA_AUX_CH_DATA1;
|
||||
break;
|
||||
case PORT_B:
|
||||
ch_ctl = PCH_DPB_AUX_CH_CTL;
|
||||
ch_data = PCH_DPB_AUX_CH_DATA1;
|
||||
break;
|
||||
case PORT_C:
|
||||
ch_ctl = PCH_DPC_AUX_CH_CTL;
|
||||
ch_data = PCH_DPC_AUX_CH_DATA1;
|
||||
break;
|
||||
case PORT_D:
|
||||
ch_ctl = PCH_DPD_AUX_CH_CTL;
|
||||
ch_data = PCH_DPD_AUX_CH_DATA1;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
intel_dp_check_edp(intel_dp);
|
||||
/* The clock divider is based off the hrawclk,
|
||||
* and would like to run at 2MHz. So, take the
|
||||
@ -853,7 +810,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
intel_link_compute_m_n(intel_crtc->bpp, lane_count,
|
||||
target_clock, adjusted_mode->clock, &m_n);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (HAS_DDI(dev)) {
|
||||
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
|
||||
TU_SIZE(m_n.tu) | m_n.gmch_m);
|
||||
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
|
||||
@ -1020,7 +977,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
}
|
||||
|
||||
if (is_cpu_edp(intel_dp))
|
||||
if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
|
||||
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
|
||||
}
|
||||
|
||||
@ -1384,7 +1341,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
|
||||
if (!(tmp & DP_PORT_EN))
|
||||
return false;
|
||||
|
||||
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
|
||||
if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
|
||||
*pipe = PORT_TO_PIPE_CPT(tmp);
|
||||
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
|
||||
*pipe = PORT_TO_PIPE(tmp);
|
||||
@ -1548,7 +1505,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (HAS_DDI(dev)) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
return DP_TRAIN_PRE_EMPHASIS_9_5;
|
||||
@ -1756,7 +1713,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
|
||||
uint32_t signal_levels, mask;
|
||||
uint8_t train_set = intel_dp->train_set[0];
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (HAS_DDI(dev)) {
|
||||
signal_levels = intel_hsw_signal_levels(train_set);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
|
||||
@ -1787,7 +1744,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
int ret;
|
||||
uint32_t temp;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
if (HAS_DDI(dev)) {
|
||||
temp = I915_READ(DP_TP_CTL(port));
|
||||
|
||||
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
|
||||
@ -2311,6 +2268,16 @@ g4x_dp_detect(struct intel_dp *intel_dp)
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
uint32_t bit;
|
||||
|
||||
/* Can't disconnect eDP, but you can close the lid... */
|
||||
if (is_edp(intel_dp)) {
|
||||
enum drm_connector_status status;
|
||||
|
||||
status = intel_panel_detect(dev);
|
||||
if (status == connector_status_unknown)
|
||||
status = connector_status_connected;
|
||||
return status;
|
||||
}
|
||||
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS;
|
||||
@ -2844,6 +2811,25 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
else
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
|
||||
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
|
||||
if (HAS_DDI(dev)) {
|
||||
switch (intel_dig_port->port) {
|
||||
case PORT_A:
|
||||
intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_B:
|
||||
intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_C:
|
||||
intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
|
||||
break;
|
||||
case PORT_D:
|
||||
intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the DDC bus. */
|
||||
switch (port) {
|
||||
|
@ -347,7 +347,7 @@ struct dip_infoframe {
|
||||
} __attribute__((packed));
|
||||
|
||||
struct intel_hdmi {
|
||||
u32 sdvox_reg;
|
||||
u32 hdmi_reg;
|
||||
int ddc_bus;
|
||||
uint32_t color_range;
|
||||
bool color_range_auto;
|
||||
@ -366,6 +366,7 @@ struct intel_hdmi {
|
||||
|
||||
struct intel_dp {
|
||||
uint32_t output_reg;
|
||||
uint32_t aux_ch_ctl_reg;
|
||||
uint32_t DP;
|
||||
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
|
||||
bool has_audio;
|
||||
@ -443,7 +444,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
||||
|
||||
extern void intel_crt_init(struct drm_device *dev);
|
||||
extern void intel_hdmi_init(struct drm_device *dev,
|
||||
int sdvox_reg, enum port port);
|
||||
int hdmi_reg, enum port port);
|
||||
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
@ -695,4 +696,6 @@ extern bool
|
||||
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
|
||||
|
||||
extern void intel_display_handle_reset(struct drm_device *dev);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
@ -150,8 +150,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
}
|
||||
info->screen_size = size;
|
||||
|
||||
// memset(info->screen_base, 0, size);
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
@ -227,7 +225,7 @@ int intel_fbdev_init(struct drm_device *dev)
|
||||
ifbdev->helper.funcs = &intel_fb_helper_funcs;
|
||||
|
||||
ret = drm_fb_helper_init(dev, &ifbdev->helper,
|
||||
dev_priv->num_pipe,
|
||||
INTEL_INFO(dev)->num_pipes,
|
||||
INTELFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(ifbdev);
|
||||
|
@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
|
||||
|
||||
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
|
||||
|
||||
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
|
||||
WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
|
||||
"HDMI port enabled, expecting disabled\n");
|
||||
}
|
||||
|
||||
@ -120,13 +120,14 @@ static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
|
||||
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
switch (frame->type) {
|
||||
case DIP_TYPE_AVI:
|
||||
return HSW_TVIDEO_DIP_AVI_DATA(pipe);
|
||||
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
|
||||
case DIP_TYPE_SPD:
|
||||
return HSW_TVIDEO_DIP_SPD_DATA(pipe);
|
||||
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
|
||||
default:
|
||||
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
|
||||
return 0;
|
||||
@ -293,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
|
||||
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder);
|
||||
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
@ -568,7 +569,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
@ -597,40 +598,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
u32 sdvox;
|
||||
u32 hdmi_val;
|
||||
|
||||
sdvox = SDVO_ENCODING_HDMI;
|
||||
hdmi_val = SDVO_ENCODING_HDMI;
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
sdvox |= intel_hdmi->color_range;
|
||||
hdmi_val |= intel_hdmi->color_range;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
|
||||
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
|
||||
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
|
||||
|
||||
if (intel_crtc->bpp > 24)
|
||||
sdvox |= COLOR_FORMAT_12bpc;
|
||||
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
|
||||
else
|
||||
sdvox |= COLOR_FORMAT_8bpc;
|
||||
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
|
||||
|
||||
/* Required on CPT */
|
||||
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
|
||||
sdvox |= HDMI_MODE_SELECT;
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
|
||||
if (intel_hdmi->has_audio) {
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
sdvox |= SDVO_AUDIO_ENABLE;
|
||||
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
|
||||
hdmi_val |= SDVO_AUDIO_ENABLE;
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
intel_write_eld(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
|
||||
else if (intel_crtc->pipe == PIPE_B)
|
||||
sdvox |= SDVO_PIPE_B_SELECT;
|
||||
hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
|
||||
else
|
||||
hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
|
||||
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
intel_hdmi->set_infoframes(encoder, adjusted_mode);
|
||||
}
|
||||
@ -643,7 +644,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 tmp;
|
||||
|
||||
tmp = I915_READ(intel_hdmi->sdvox_reg);
|
||||
tmp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
if (!(tmp & SDVO_ENABLE))
|
||||
return false;
|
||||
@ -660,6 +661,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
u32 enable_bits = SDVO_ENABLE;
|
||||
@ -667,38 +669,32 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
if (intel_hdmi->has_audio)
|
||||
enable_bits |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
temp = I915_READ(intel_hdmi->sdvox_reg);
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround for IBX, we need to move the port to transcoder A
|
||||
* before disabling it. */
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
struct drm_crtc *crtc = encoder->base.crtc;
|
||||
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
|
||||
|
||||
/* Restore the transcoder select bit. */
|
||||
if (pipe == PIPE_B)
|
||||
enable_bits |= SDVO_PIPE_B_SELECT;
|
||||
}
|
||||
* before disabling it, so restore the transcoder select bit here. */
|
||||
if (HAS_PCH_IBX(dev))
|
||||
enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
|
||||
|
||||
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
|
||||
* we do this anyway which shows more stable in testing.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
temp |= enable_bits;
|
||||
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround, need to write this twice for issue that may result
|
||||
* in first write getting masked.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -710,7 +706,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
u32 temp;
|
||||
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
|
||||
|
||||
temp = I915_READ(intel_hdmi->sdvox_reg);
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround for IBX, we need to move the port to transcoder A
|
||||
* before disabling it. */
|
||||
@ -720,12 +716,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
|
||||
if (temp & SDVO_PIPE_B_SELECT) {
|
||||
temp &= ~SDVO_PIPE_B_SELECT;
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* Again we need to write this twice. */
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* Transcoder selection bits only update
|
||||
* effectively on vblank. */
|
||||
@ -740,21 +736,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
* we do this anyway which shows more stable in testing.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
temp &= ~enable_bits;
|
||||
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround, need to write this twice for issue that may result
|
||||
* in first write getting masked.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(intel_hdmi->sdvox_reg, temp);
|
||||
POSTING_READ(intel_hdmi->sdvox_reg);
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -782,7 +778,7 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (intel_hdmi->has_hdmi_sink &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_hdmi->color_range = 0;
|
||||
}
|
||||
@ -916,7 +912,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_hdmi->color_range_auto = false;
|
||||
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -1008,13 +1004,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
BUG();
|
||||
}
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
intel_hdmi->write_infoframe = g4x_write_infoframe;
|
||||
intel_hdmi->set_infoframes = g4x_set_infoframes;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
intel_hdmi->write_infoframe = vlv_write_infoframe;
|
||||
intel_hdmi->set_infoframes = vlv_set_infoframes;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
} else if (!HAS_PCH_SPLIT(dev)) {
|
||||
intel_hdmi->write_infoframe = g4x_write_infoframe;
|
||||
intel_hdmi->set_infoframes = g4x_set_infoframes;
|
||||
} else if (HAS_DDI(dev)) {
|
||||
intel_hdmi->write_infoframe = hsw_write_infoframe;
|
||||
intel_hdmi->set_infoframes = hsw_set_infoframes;
|
||||
} else if (HAS_PCH_IBX(dev)) {
|
||||
@ -1045,7 +1041,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
|
||||
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
@ -1078,7 +1074,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
|
||||
intel_encoder->cloneable = false;
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
|
||||
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
|
||||
intel_dig_port->dp.output_reg = 0;
|
||||
|
||||
intel_hdmi_init_connector(intel_dig_port, intel_connector);
|
||||
|
@ -1019,12 +1019,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
|
||||
{
|
||||
/* With the introduction of the PCH we gained a dedicated
|
||||
* LVDS presence pin, use it. */
|
||||
if (HAS_PCH_SPLIT(dev))
|
||||
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
return true;
|
||||
|
||||
/* Otherwise LVDS was only attached to mobile products,
|
||||
* except for the inglorious 830gm */
|
||||
return IS_MOBILE(dev) && !IS_I830(dev);
|
||||
if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -335,7 +335,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
|
||||
if (tmp & BLM_PWM_ENABLE)
|
||||
goto set_level;
|
||||
|
||||
if (dev_priv->num_pipe == 3)
|
||||
if (INTEL_INFO(dev)->num_pipes == 3)
|
||||
tmp &= ~BLM_PIPE_SELECT_IVB;
|
||||
else
|
||||
tmp &= ~BLM_PIPE_SELECT;
|
||||
|
@ -2631,9 +2631,11 @@ static void gen6_enable_rps(struct drm_device *dev)
|
||||
if (!ret) {
|
||||
pcu_mbox = 0;
|
||||
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
|
||||
if (ret && pcu_mbox & (1<<31)) { /* OC supported */
|
||||
if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
|
||||
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n",
|
||||
(dev_priv->rps.max_delay & 0xff) * 50,
|
||||
(pcu_mbox & 0xff) * 50);
|
||||
dev_priv->rps.max_delay = pcu_mbox & 0xff;
|
||||
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
|
||||
@ -2821,7 +2823,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
|
||||
ret = intel_ring_idle(ring);
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable ironlake power power savings\n");
|
||||
DRM_ERROR("failed to enable ironlake power savings\n");
|
||||
ironlake_teardown_rc6(dev);
|
||||
return;
|
||||
}
|
||||
@ -3768,6 +3770,9 @@ static void haswell_init_clock_gating(struct drm_device *dev)
|
||||
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
|
||||
GEN6_MBCTL_ENABLE_BOOT_FETCH);
|
||||
|
||||
/* WaSwitchSolVfFArbitrationPriority */
|
||||
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
|
||||
|
||||
/* XXX: This is a workaround for early silicon revisions and should be
|
||||
* removed later.
|
||||
*/
|
||||
@ -3899,8 +3904,10 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
||||
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
|
||||
CHICKEN3_DGMG_DONE_FIX_DISABLE);
|
||||
|
||||
/* WaDisablePSDDualDispatchEnable */
|
||||
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
|
||||
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
|
||||
_MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
|
||||
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
|
||||
|
||||
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
|
||||
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
|
||||
@ -3985,7 +3992,16 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
|
||||
* Disable clock gating on th GCFG unit to prevent a delay
|
||||
* in the reporting of vblank events.
|
||||
*/
|
||||
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
|
||||
I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
|
||||
|
||||
/* Conservative clock gating settings for now */
|
||||
I915_WRITE(0x9400, 0xffffffff);
|
||||
I915_WRITE(0x9404, 0xffffffff);
|
||||
I915_WRITE(0x9408, 0xffffffff);
|
||||
I915_WRITE(0x940c, 0xffffffff);
|
||||
I915_WRITE(0x9410, 0xffffffff);
|
||||
I915_WRITE(0x9414, 0xffffffff);
|
||||
I915_WRITE(0x9418, 0xffffffff);
|
||||
}
|
||||
|
||||
static void g4x_init_clock_gating(struct drm_device *dev)
|
||||
@ -4076,7 +4092,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
bool is_enabled, enable_requested;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
if (!HAS_POWER_WELL(dev))
|
||||
return;
|
||||
|
||||
if (!i915_disable_power_well && !enable)
|
||||
@ -4114,7 +4130,7 @@ void intel_init_power_well(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!IS_HASWELL(dev))
|
||||
if (!HAS_POWER_WELL(dev))
|
||||
return;
|
||||
|
||||
/* For now, we need the power well to be always enabled. */
|
||||
@ -4274,21 +4290,14 @@ static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
forcewake_ack = FORCEWAKE_ACK_HSW;
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_ACK;
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 1);
|
||||
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
|
||||
|
||||
@ -4311,7 +4320,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_MT_ACK;
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
|
||||
|
||||
@ -4319,7 +4328,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
/* something from same cacheline, but !FORCEWAKE_MT */
|
||||
POSTING_READ(ECOBUS);
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
|
||||
|
||||
@ -4409,15 +4418,22 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
|
||||
DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
|
||||
|
||||
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
|
||||
FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
|
||||
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
@ -4425,8 +4441,9 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
/* something from same cacheline, but !FORCEWAKE_VLV */
|
||||
POSTING_READ(FORCEWAKE_ACK_VLV);
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
|
||||
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
/* The below doubles as a POSTING_READ */
|
||||
gen6_gt_check_fifodbg(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -246,11 +246,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_sdvo->sdvo_reg == SDVOB) {
|
||||
cval = I915_READ(SDVOC);
|
||||
} else {
|
||||
bval = I915_READ(SDVOB);
|
||||
}
|
||||
if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
|
||||
cval = I915_READ(GEN3_SDVOC);
|
||||
else
|
||||
bval = I915_READ(GEN3_SDVOB);
|
||||
|
||||
/*
|
||||
* Write the registers twice for luck. Sometimes,
|
||||
* writing them only once doesn't appear to 'stick'.
|
||||
@ -258,10 +258,10 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
|
||||
*/
|
||||
for (i = 0; i < 2; i++)
|
||||
{
|
||||
I915_WRITE(SDVOB, bval);
|
||||
I915_READ(SDVOB);
|
||||
I915_WRITE(SDVOC, cval);
|
||||
I915_READ(SDVOC);
|
||||
I915_WRITE(GEN3_SDVOB, bval);
|
||||
I915_READ(GEN3_SDVOB);
|
||||
I915_WRITE(GEN3_SDVOC, cval);
|
||||
I915_READ(GEN3_SDVOC);
|
||||
}
|
||||
}
|
||||
|
||||
@ -451,7 +451,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
|
||||
int i, ret = true;
|
||||
|
||||
/* Would be simpler to allocate both in one go ? */
|
||||
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
|
||||
buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return false;
|
||||
|
||||
@ -965,6 +965,8 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
|
||||
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
|
||||
}
|
||||
|
||||
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
|
||||
|
||||
intel_dip_infoframe_csum(&avi_if);
|
||||
|
||||
/* sdvo spec says that the ecc is handled by the hw, and it looks like
|
||||
@ -1076,9 +1078,11 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
|
||||
|
||||
if (intel_sdvo->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
/* FIXME: This bit is only valid when using TMDS encoding and 8
|
||||
* bit per color mode. */
|
||||
if (intel_sdvo->has_hdmi_monitor &&
|
||||
drm_match_cea_mode(adjusted_mode) > 1)
|
||||
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
else
|
||||
intel_sdvo->color_range = 0;
|
||||
}
|
||||
@ -1182,10 +1186,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
} else {
|
||||
sdvox = I915_READ(intel_sdvo->sdvo_reg);
|
||||
switch (intel_sdvo->sdvo_reg) {
|
||||
case SDVOB:
|
||||
case GEN3_SDVOB:
|
||||
sdvox &= SDVOB_PRESERVE_MASK;
|
||||
break;
|
||||
case SDVOC:
|
||||
case GEN3_SDVOC:
|
||||
sdvox &= SDVOC_PRESERVE_MASK;
|
||||
break;
|
||||
}
|
||||
@ -1193,9 +1197,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
|
||||
sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
|
||||
sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
|
||||
else
|
||||
sdvox |= TRANSCODER(intel_crtc->pipe);
|
||||
sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
|
||||
|
||||
if (intel_sdvo->has_hdmi_audio)
|
||||
sdvox |= SDVO_AUDIO_ENABLE;
|
||||
@ -1305,15 +1309,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
|
||||
temp = I915_READ(intel_sdvo->sdvo_reg);
|
||||
if ((temp & SDVO_ENABLE) == 0) {
|
||||
/* HW workaround for IBX, we need to move the port
|
||||
* to transcoder A before disabling it. */
|
||||
if (HAS_PCH_IBX(dev)) {
|
||||
struct drm_crtc *crtc = encoder->base.crtc;
|
||||
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
|
||||
|
||||
/* Restore the transcoder select bit. */
|
||||
if (pipe == PIPE_B)
|
||||
temp |= SDVO_PIPE_B_SELECT;
|
||||
}
|
||||
* to transcoder A before disabling it, so restore it here. */
|
||||
if (HAS_PCH_IBX(dev))
|
||||
temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
|
||||
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
|
||||
}
|
||||
@ -1932,7 +1930,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
||||
break;
|
||||
case INTEL_BROADCAST_RGB_LIMITED:
|
||||
intel_sdvo->color_range_auto = false;
|
||||
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
|
||||
/* FIXME: this bit is only valid when using TMDS
|
||||
* encoding and 8 bit per color mode. */
|
||||
intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -1645,6 +1645,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
|
||||
if (!fb_info->modelist.prev || !fb_info->modelist.next)
|
||||
INIT_LIST_HEAD(&fb_info->modelist);
|
||||
|
||||
if (fb_info->skip_vt_switch)
|
||||
pm_vt_switch_required(fb_info->dev, false);
|
||||
else
|
||||
pm_vt_switch_required(fb_info->dev, true);
|
||||
|
||||
fb_var_to_videomode(&mode, &fb_info->var);
|
||||
fb_add_videomode(&mode, &fb_info->modelist);
|
||||
registered_fb[i] = fb_info;
|
||||
@ -1679,6 +1684,8 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
pm_vt_switch_unregister(fb_info->dev);
|
||||
|
||||
unlink_framebuffer(fb_info);
|
||||
if (fb_info->pixmap.addr &&
|
||||
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
|
||||
|
@ -501,6 +501,8 @@ struct fb_info {
|
||||
resource_size_t size;
|
||||
} ranges[0];
|
||||
} *apertures;
|
||||
|
||||
bool skip_vt_switch; /* no VT switch on suspend/resume required */
|
||||
};
|
||||
|
||||
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
|
||||
|
@ -34,6 +34,19 @@
|
||||
extern void (*pm_power_off)(void);
|
||||
extern void (*pm_power_off_prepare)(void);
|
||||
|
||||
struct device; /* we have a circular dep with device.h */
|
||||
#ifdef CONFIG_VT_CONSOLE_SLEEP
|
||||
extern void pm_vt_switch_required(struct device *dev, bool required);
|
||||
extern void pm_vt_switch_unregister(struct device *dev);
|
||||
#else
|
||||
static inline void pm_vt_switch_required(struct device *dev, bool required)
|
||||
{
|
||||
}
|
||||
static inline void pm_vt_switch_unregister(struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_VT_CONSOLE_SLEEP */
|
||||
|
||||
/*
|
||||
* Device power management
|
||||
*/
|
||||
|
@ -4,6 +4,7 @@
|
||||
* Originally from swsusp.
|
||||
*/
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/kbd_kern.h>
|
||||
#include <linux/vt.h>
|
||||
@ -14,8 +15,120 @@
|
||||
|
||||
static int orig_fgconsole, orig_kmsg;
|
||||
|
||||
static DEFINE_MUTEX(vt_switch_mutex);
|
||||
|
||||
struct pm_vt_switch {
|
||||
struct list_head head;
|
||||
struct device *dev;
|
||||
bool required;
|
||||
};
|
||||
|
||||
static LIST_HEAD(pm_vt_switch_list);
|
||||
|
||||
|
||||
/**
|
||||
* pm_vt_switch_required - indicate VT switch at suspend requirements
|
||||
* @dev: device
|
||||
* @required: if true, caller needs VT switch at suspend/resume time
|
||||
*
|
||||
* The different console drivers may or may not require VT switches across
|
||||
* suspend/resume, depending on how they handle restoring video state and
|
||||
* what may be running.
|
||||
*
|
||||
* Drivers can indicate support for switchless suspend/resume, which can
|
||||
* save time and flicker, by using this routine and passing 'false' as
|
||||
* the argument. If any loaded driver needs VT switching, or the
|
||||
* no_console_suspend argument has been passed on the command line, VT
|
||||
* switches will occur.
|
||||
*/
|
||||
void pm_vt_switch_required(struct device *dev, bool required)
|
||||
{
|
||||
struct pm_vt_switch *entry, *tmp;
|
||||
|
||||
mutex_lock(&vt_switch_mutex);
|
||||
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
|
||||
if (tmp->dev == dev) {
|
||||
/* already registered, update requirement */
|
||||
tmp->required = required;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out;
|
||||
|
||||
entry->required = required;
|
||||
entry->dev = dev;
|
||||
|
||||
list_add(&entry->head, &pm_vt_switch_list);
|
||||
out:
|
||||
mutex_unlock(&vt_switch_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(pm_vt_switch_required);
|
||||
|
||||
/**
|
||||
* pm_vt_switch_unregister - stop tracking a device's VT switching needs
|
||||
* @dev: device
|
||||
*
|
||||
* Remove @dev from the vt switch list.
|
||||
*/
|
||||
void pm_vt_switch_unregister(struct device *dev)
|
||||
{
|
||||
struct pm_vt_switch *tmp;
|
||||
|
||||
mutex_lock(&vt_switch_mutex);
|
||||
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
|
||||
if (tmp->dev == dev) {
|
||||
list_del(&tmp->head);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&vt_switch_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(pm_vt_switch_unregister);
|
||||
|
||||
/*
|
||||
* There are three cases when a VT switch on suspend/resume are required:
|
||||
* 1) no driver has indicated a requirement one way or another, so preserve
|
||||
* the old behavior
|
||||
* 2) console suspend is disabled, we want to see debug messages across
|
||||
* suspend/resume
|
||||
* 3) any registered driver indicates it needs a VT switch
|
||||
*
|
||||
* If none of these conditions is present, meaning we have at least one driver
|
||||
* that doesn't need the switch, and none that do, we can avoid it to make
|
||||
* resume look a little prettier (and suspend too, but that's usually hidden,
|
||||
* e.g. when closing the lid on a laptop).
|
||||
*/
|
||||
static bool pm_vt_switch(void)
|
||||
{
|
||||
struct pm_vt_switch *entry;
|
||||
bool ret = true;
|
||||
|
||||
mutex_lock(&vt_switch_mutex);
|
||||
if (list_empty(&pm_vt_switch_list))
|
||||
goto out;
|
||||
|
||||
if (!console_suspend_enabled)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry(entry, &pm_vt_switch_list, head) {
|
||||
if (entry->required)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = false;
|
||||
out:
|
||||
mutex_unlock(&vt_switch_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pm_prepare_console(void)
|
||||
{
|
||||
if (!pm_vt_switch())
|
||||
return 0;
|
||||
|
||||
orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1);
|
||||
if (orig_fgconsole < 0)
|
||||
return 1;
|
||||
@ -26,6 +139,9 @@ int pm_prepare_console(void)
|
||||
|
||||
void pm_restore_console(void)
|
||||
{
|
||||
if (!pm_vt_switch())
|
||||
return;
|
||||
|
||||
if (orig_fgconsole >= 0) {
|
||||
vt_move_to_console(orig_fgconsole, 0);
|
||||
vt_kmsg_redirect(orig_kmsg);
|
||||
|
Loading…
Reference in New Issue
Block a user