Merge tag 'drm-intel-next-2012-02-16-merge-resolved' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next

* tag 'drm-intel-next-2012-02-16-merge-resolved' of git://people.freedesktop.org/~danvet/drm-intel: (45 commits)
  Revert "drivers/gpu/drm/i915/intel_overlay.c needs seq_file.h"
  drm/i915/lvds: Always use the presence pin for LVDS on PCH
  drm/i915: Record the position of the request upon error
  drm/i915: Record the in-flight requests at the time of a hang
  drm/i915: Record the tail at each request and use it to estimate the head
  drm/i915: add missing SDVO bits for interlaced modes on ILK
  drm/i915: Fix race condition in accessing GMBUS
  drm/i915: add a "force-dvi" HDMI audio mode
  drm/i915: Don't lock panel registers when downclocking
  drm/i915: fix up locking inconsistency around gem_do_init
  drm/i915: enable forcewake voodoo also for gen6
  drm/i915: fixup seqno allocation logic for lazy_request
  drm/i915: outstanding_lazy_request is a u32
  drm/i915: check gtfifodbg after possibly failed writes
  drm/i915: catch gtfifo errors on forcewake_put
  drm/i915: use gtfifodbg
  drm/i915: set interlaced bits for TRANSCONF
  drm/i915: fixup overlay checks for interlaced modes
  drm/i915: allow interlaced mode output on the HDMI connector
  drm/i915: allow interlaced mode output on the SDVO connector
  ...
This commit is contained in:
Dave Airlie 2012-02-23 14:11:53 +00:00
commit e5bcf23443
28 changed files with 1044 additions and 230 deletions

View File

@ -76,7 +76,6 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
dma_addr_t scratch_page_dma;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void)
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
intel_private.scratch_page_dma = dma_addr;
intel_private.base.scratch_page_dma = dma_addr;
} else
intel_private.scratch_page_dma = page_to_phys(page);
intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@ -681,6 +680,7 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers);
return -ENOMEM;
}
intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */
@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);

View File

@ -315,7 +315,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (err)
return err;
if (__get_user(c32.auth, &client->auth)
if (__get_user(c32.idx, &client->idx)
|| __get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)

View File

@ -721,8 +721,14 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
seq_printf(m, " SYNC_1: 0x%08x\n",
error->semaphore_mboxes[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
static int i915_error_state(struct seq_file *m, void *unused)
@ -732,7 +738,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
int i, page, offset, elt;
int i, j, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@ -772,10 +778,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->pinned_bo,
error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i];
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
if ((obj = error->ring[i].batchbuffer)) {
seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@ -787,11 +793,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
}
}
for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
if (error->ringbuffer[i]) {
struct drm_i915_error_object *obj = error->ringbuffer[i];
if (error->ring[i].num_requests) {
seq_printf(m, "%s --- %d requests\n",
dev_priv->ring[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
error->ring[i].requests[j].seqno,
error->ring[i].requests[j].jiffies,
error->ring[i].requests[j].tail);
}
}
if ((obj = error->ring[i].ringbuffer)) {
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@ -1431,12 +1446,62 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
I915_READ16(C1DRB3));
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
I915_READ(MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
I915_READ(MAD_DIMM_C1));
seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
seq_printf(m, "ARB_MODE = 0x%08x\n",
I915_READ(ARB_MODE));
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for (i = 0; i < I915_NUM_RINGS; i++) {
ring = &dev_priv->ring[i];
seq_printf(m, "%s\n", ring->name);
if (INTEL_INFO(dev)->gen == 7)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
seq_printf(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int
i915_debugfs_common_open(struct inode *inode,
struct file *filp)
@ -1778,6 +1843,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)

View File

@ -1196,22 +1196,39 @@ static int i915_load_gem_init(struct drm_device *dev)
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
* prefetches past the end of the object, and we've seen multiple
* hangs with the GPU head pointer stuck in a batchbuffer bound
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
*/
i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
/* For paranoia keep the guard page in between. */
gtt_size -= PAGE_SIZE;
i915_gem_do_init(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret)
return ret;
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
}
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret)
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
}
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
@ -1298,6 +1315,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@ -2187,6 +2205,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);

View File

@ -103,6 +103,11 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
bool i915_enable_ppgtt __read_mostly = 1;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
static struct drm_driver driver;
extern int intel_agp_enabled;
@ -380,16 +385,27 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
POSTING_READ(FORCEWAKE_MT);
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
/*
@ -405,8 +421,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@ -414,10 +432,13 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
return ret;
}
static int i915_drm_freeze(struct drm_device *dev)
@ -498,7 +519,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_ringbuffer(dev);
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (HAS_PCH_SPLIT(dev))
@ -713,12 +734,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
!dev_priv->mm.suspended) {
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
@ -981,11 +1006,15 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__gen6_gt_wait_for_fifo(dev_priv); \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
}
__i915_write(8, b)
__i915_write(16, w)

View File

@ -159,6 +159,10 @@ struct drm_i915_error_state {
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
@ -170,11 +174,19 @@ struct drm_i915_error_state {
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_ring {
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer;
struct drm_i915_error_request {
long jiffies;
u32 seqno;
u32 tail;
} *requests;
int num_requests;
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@ -254,6 +266,16 @@ struct intel_device_info {
u8 has_llc:1;
};
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
};
enum no_fbc_reason {
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@ -299,6 +321,10 @@ typedef struct drm_i915_private {
u32 reg0;
} *gmbus;
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
@ -580,6 +606,9 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
/**
@ -745,6 +774,13 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
HDMI_AUDIO_AUTO, /* trust EDID */
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_LLC,
@ -837,6 +873,8 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
unsigned int has_aliasing_ppgtt_mapping:1;
struct page **pages;
/**
@ -914,6 +952,9 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
/** Postion in the ringbuffer of the end of the request */
u32 tail;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@ -973,6 +1014,8 @@ struct drm_i915_file_private {
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@ -1015,6 +1058,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern bool i915_enable_ppgtt __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@ -1155,12 +1199,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
static inline u32
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
@ -1185,13 +1224,17 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
@ -1231,6 +1274,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
@ -1369,7 +1420,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \

View File

@ -1576,6 +1576,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
}
}
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno = dev_priv->next_seqno;
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
return seqno;
}
u32
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
if (ring->outstanding_lazy_request == 0)
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
return ring->outstanding_lazy_request;
}
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
@ -1583,10 +1605,19 @@ i915_add_request(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
u32 request_ring_position;
int was_empty;
int ret;
BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
if (ret)
@ -1596,6 +1627,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = seqno;
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
@ -1610,7 +1642,7 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
ring->outstanding_lazy_request = false;
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
@ -1732,7 +1764,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
static void
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@ -1760,6 +1792,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
ring->last_retired_head = request->tail;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@ -2020,6 +2058,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
if (obj->gtt_space == NULL)
@ -2064,6 +2103,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_object_put_pages_gtt(obj);
list_del_init(&obj->gtt_list);
@ -2882,6 +2926,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (obj->cache_level == cache_level)
@ -2910,6 +2956,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
i915_gem_gtt_rebind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@ -3681,12 +3730,71 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN5(dev))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for (i = 0; i < I915_NUM_RINGS; i++) {
ring = &dev_priv->ring[i];
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
int
i915_gem_init_ringbuffer(struct drm_device *dev)
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
i915_gem_init_swizzling(dev);
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@ -3705,6 +3813,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
dev_priv->next_seqno = 1;
i915_gem_init_ppgtt(dev);
return 0;
cleanup_bsd_ring:
@ -3742,7 +3852,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
ret = i915_gem_init_ringbuffer(dev);
ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;

View File

@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
DRM_ERROR("No GTT space found for object %d\n",
DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
DRM_ERROR("reloc with multiple write domains: "
DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@ -305,7 +305,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("reloc with read/write non-GPU domains: "
DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@ -316,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_ERROR("Write domain conflict: "
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
@ -337,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
DRM_ERROR("Relocation beyond object bounds: "
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
@ -345,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
if (unlikely(reloc->offset & 3)) {
DRM_ERROR("Relocation not 4-byte aligned: "
DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
@ -515,6 +515,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
@ -623,6 +624,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
}
i915_gem_object_unpin(obj);
/* ... and ensure ppgtt mapping exist if needed. */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry > 1)
@ -724,7 +733,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
@ -1055,7 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
DRM_ERROR("execbuf with invalid offset/length\n");
DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
@ -1070,20 +1079,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_ERROR("execbuf with invalid ring (BSD)\n");
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_ERROR("execbuf with invalid ring (BLT)\n");
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
DRM_ERROR("execbuf with unknown ring: %d\n",
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
@ -1109,18 +1118,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
break;
default:
DRM_ERROR("execbuf with unknown constants: %d\n", mode);
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_ERROR("clip rectangles are only valid with the render ring\n");
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
@ -1165,7 +1174,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
@ -1173,7 +1182,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (!list_empty(&obj->exec_list)) {
DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
@ -1211,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
DRM_ERROR("Attempting to use self-modifying batch buffer\n");
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
@ -1316,7 +1325,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i;
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@ -1324,7 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@ -1335,7 +1344,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_ERROR("copy %d exec entries failed %d\n",
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@ -1376,7 +1385,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_ERROR("failed to copy %d exec entries "
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
@ -1396,7 +1405,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int ret;
if (args->buffer_count < 1) {
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@ -1406,7 +1415,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
@ -1415,7 +1424,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_ERROR("copy %d exec entries failed %d\n",
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
@ -1430,7 +1439,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
DRM_ERROR("failed to copy %d exec entries "
DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}

View File

@ -29,6 +29,279 @@
#include "i915_trace.h"
#include "intel_drv.h"
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
uint32_t *pt_vaddr;
uint32_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
kunmap_atomic(pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pd++;
}
}
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
uint32_t pd_entry;
unsigned first_pd_entry_in_global_pt;
uint32_t __iomem *pd_addr;
int i;
int ret = -ENOMEM;
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ret;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
goto err_ppgtt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
}
if (dev_priv->mm.gtt->needs_dmar) {
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
}
pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar) {
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev,
pt_addr)) {
ret = -EIO;
goto err_pd_pin;
}
ppgtt->pt_dma_addr[i] = pt_addr;
} else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
return 0;
err_pd_pin:
if (ppgtt->pt_dma_addr) {
for (i--; i >= 0; i--)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
err_pt_alloc:
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
err_ppgtt:
kfree(ppgtt);
return ret;
}
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
if (!ppgtt)
return;
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
struct scatterlist *sg_list,
unsigned sg_len,
unsigned first_entry,
uint32_t pte_flags)
{
uint32_t *pt_vaddr, pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
dma_addr_t page_addr;
struct scatterlist *sg;
/* init sg walking */
sg = sg_list;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
while (i < sg_len) {
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[j] = pte | pte_flags;
/* grab the next page */
m++;
if (m == segment_len) {
sg = sg_next(sg);
i++;
if (i == sg_len)
break;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
}
kunmap_atomic(pt_vaddr);
first_pte = 0;
act_pd++;
}
}
static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned num_entries,
struct page **pages, uint32_t pte_flags)
{
uint32_t *pt_vaddr, pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
dma_addr_t page_addr;
while (num_entries) {
last_pte = first_pte + num_entries;
last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
for (i = first_pte; i < last_pte; i++) {
page_addr = page_to_phys(*pages);
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[i] = pte | pte_flags;
pages++;
}
kunmap_atomic(pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pd++;
}
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
pte_flags);
} else
i915_ppgtt_insert_pages(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
pte_flags);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
i915_ppgtt_clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)

View File

@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 6) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.

View File

@ -788,11 +788,11 @@ i915_error_state_free(struct drm_device *dev,
{
int i;
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
i915_error_object_free(error->batchbuffer[i]);
for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
i915_error_object_free(error->ringbuffer[i]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
kfree(error->ring[i].requests);
}
kfree(error->active_bo);
kfree(error->overlay);
@ -903,6 +903,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) {
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
}
if (INTEL_INFO(dev)->gen >= 4) {
@ -925,6 +929,55 @@ static void i915_record_ring_state(struct drm_device *dev,
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
}
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (ring->obj == NULL)
continue;
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
count = 0;
list_for_each_entry(request, &ring->request_list, list)
count++;
error->ring[i].num_requests = count;
error->ring[i].requests =
kmalloc(count*sizeof(struct drm_i915_error_request),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
continue;
}
count = 0;
list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq;
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail;
}
}
}
/**
@ -970,24 +1023,8 @@ static void i915_capture_error_state(struct drm_device *dev)
error->done_reg = I915_READ(DONE_REG);
}
i915_record_ring_state(dev, error, &dev_priv->ring[RCS]);
if (HAS_BLT(dev))
i915_record_ring_state(dev, error, &dev_priv->ring[BCS]);
if (HAS_BSD(dev))
i915_record_ring_state(dev, error, &dev_priv->ring[VCS]);
i915_gem_record_fences(dev, error);
/* Record the active batch and ring buffers */
for (i = 0; i < I915_NUM_RINGS; i++) {
error->batchbuffer[i] =
i915_error_first_batchbuffer(dev_priv,
&dev_priv->ring[i]);
error->ringbuffer[i] =
i915_error_object_create(dev_priv,
dev_priv->ring[i].obj);
}
i915_gem_record_rings(dev, error);
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@ -1778,18 +1815,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
* MI_USER_INTERRUPT. This appears to serialize the
* previous seqno write out before the interrupt
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
/* XXX hotplug from PCH */
I915_WRITE(DEIMR, 0xffffffff);

View File

@ -86,12 +86,45 @@
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
#define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@ -295,6 +328,12 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
#define TILECTL_BACKSNOOP_DIS (1 << 3)
/*
* Instruction and interrupt control regs
*/
@ -318,6 +357,11 @@
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@ -395,6 +439,7 @@
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@ -1037,6 +1082,29 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
/** snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define MAD_DIMM_ECC_MASK (0x3 << 24)
#define MAD_DIMM_ECC_OFF (0x0 << 24)
#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
#define MAD_DIMM_ECC_ON (0x3 << 24)
#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
#define MAD_DIMM_A_SELECT (0x1 << 16)
/* DIMM sizes are in multiples of 256mb. */
#define MAD_DIMM_B_SIZE_SHIFT 8
#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@ -1316,6 +1384,7 @@
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
@ -1326,6 +1395,8 @@
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
@ -1334,6 +1405,7 @@
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@ -2319,10 +2391,21 @@
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_INTERLACE_MASK (7 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
/* Ironlake and later have a complete new set of values for interlaced. PFIT
* means panel fitter required, PF means progressive fetch, DBL means power
* saving pixel doubling. */
#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
#define PIPECONF_INTERLACED_ILK (3 << 21)
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@ -3205,6 +3288,7 @@
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
@ -3235,6 +3319,7 @@
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
#define _TRANS_VSYNCSHIFT_B 0xe1028
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
@ -3242,6 +3327,8 @@
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
_TRANS_VSYNCSHIFT_B)
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
@ -3275,7 +3362,10 @@
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
@ -3614,6 +3704,12 @@
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
#define GTFIFODBG 0x120000
#define GT_FIFO_CPU_ERROR_MASK 7
#define GT_FIFO_OVFERR (1<<2)
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20

View File

@ -594,7 +594,10 @@ void intel_crt_init(struct drm_device *dev)
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 1;
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);

View File

@ -1266,7 +1266,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
u32 val, pipeconf_val;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@ -1280,6 +1281,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
@ -1287,8 +1289,19 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
val |= pipeconf_val & PIPE_BPC_MASK;
}
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
else
val |= TRANS_PROGRESSIVE;
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@ -1901,7 +1914,7 @@ static void intel_update_fbc(struct drm_device *dev)
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 5)
if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
@ -2973,6 +2986,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
intel_fdi_normal_train(crtc);
@ -3437,11 +3451,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return false;
}
/* XXX some encoders set the crtcinfo, others don't.
* Obviously we need some form of conflict resolution here...
*/
if (adjusted_mode->crtc_htotal == 0)
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* All interlaced capable intel hw wants timings in frames. */
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@ -5106,7 +5117,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, dspcntr, pipeconf;
u32 dpll, dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@ -5387,17 +5398,22 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2;
} else {
pipeconf |= PIPECONF_PROGRESSIVE;
vsyncshift = 0;
}
if (!IS_GEN3(dev))
I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@ -5979,17 +5995,19 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(VSYNCSHIFT(pipe),
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@ -6032,12 +6050,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev, pipe);
if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
@ -6999,9 +7011,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
assert_panel_unlocked(dev_priv, pipe);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@ -7010,9 +7020,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
@ -7042,9 +7049,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
PANEL_UNLOCK_REGS);
assert_panel_unlocked(dev_priv, pipe);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@ -7052,9 +7057,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
@ -7753,10 +7755,9 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds = false;
bool has_lvds;
if (IS_MOBILE(dev) && !IS_I830(dev))
has_lvds = intel_lvds_init(dev);
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
@ -8234,6 +8235,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
int i;
@ -8245,6 +8247,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */

View File

@ -208,17 +208,8 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
intel_dp_link_required(int pixel_clock, int bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
if (check_bpp)
bpp = check_bpp;
else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
}
@ -245,12 +236,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(intel_dp,
mode->clock, 18);
mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
@ -682,7 +672,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@ -700,7 +690,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(intel_dp, mode->clock, bpp)
if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;

View File

@ -157,7 +157,6 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vsync_end);
C(vtotal);
C(clock);
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}

View File

@ -44,7 +44,7 @@ struct intel_hdmi {
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
@ -339,7 +339,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
@ -347,8 +349,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
}
if (status == connector_status_connected) {
if (intel_hdmi->force_audio)
intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
}
return status;
@ -402,7 +405,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
enum hdmi_force_audio i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
@ -410,13 +413,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
intel_hdmi->force_audio = i;
if (i == 0)
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_hdmi_detect_audio(connector);
else
has_audio = i > 0;
has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_hdmi->has_audio)
return 0;
if (i == HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink = 0;
intel_hdmi->has_audio = has_audio;
goto done;
@ -514,7 +517,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);

View File

@ -233,11 +233,15 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = adapter->algo_data;
int i, reg_offset;
int i, reg_offset, ret;
if (bus->force_bit)
return intel_i2c_quirk_xfer(dev_priv,
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
ret = intel_i2c_quirk_xfer(dev_priv,
bus->force_bit, msgs, num);
goto out;
}
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
@ -321,7 +325,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
* start of the next xfer, till then let it sleep.
*/
I915_WRITE(GMBUS0 + reg_offset, 0);
return i;
ret = i;
goto out;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
@ -331,9 +336,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
if (!bus->force_bit)
return -ENOMEM;
return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
ret = -ENOMEM;
else
ret = intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
}
static u32 gmbus_func(struct i2c_adapter *adapter)
@ -380,6 +388,8 @@ int intel_setup_gmbus(struct drm_device *dev)
if (dev_priv->gmbus == NULL)
return -ENOMEM;
mutex_init(&dev_priv->gmbus_mutex);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];

View File

@ -692,6 +692,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "AOpen i45GMx-I",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
@ -836,6 +844,18 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
if (HAS_PCH_SPLIT(dev))
return true;
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
return IS_MOBILE(dev) && !IS_I830(dev);
}
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@ -857,6 +877,9 @@ bool intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
if (!intel_lvds_supported(dev))
return false;
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return false;

View File

@ -84,9 +84,10 @@ int intel_ddc_get_modes(struct drm_connector *connector,
}
static const struct drm_prop_enum_list force_audio_names[] = {
{ -1, "off" },
{ 0, "auto" },
{ 1, "on" },
{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
{ HDMI_AUDIO_OFF, "off" },
{ HDMI_AUDIO_AUTO, "auto" },
{ HDMI_AUDIO_ON, "on" },
};
void

View File

@ -25,8 +25,6 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@ -264,7 +262,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
drm_mode_set_crtcinfo(mode, 0);
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@ -937,10 +935,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
if (rec->dst_x < mode->crtc_hdisplay &&
rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
rec->dst_y < mode->crtc_vdisplay &&
rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
if (rec->dst_x < mode->hdisplay &&
rec->dst_x + rec->dst_width <= mode->hdisplay &&
rec->dst_y < mode->vdisplay &&
rec->dst_y + rec->dst_height <= mode->vdisplay)
return 0;
else
return -EINVAL;

View File

@ -48,7 +48,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
adjusted_mode->clock = fixed_mode->clock;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */

View File

@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
seqno = dev_priv->next_seqno;
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
return seqno;
}
static int
render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@ -465,7 +451,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
*seqno = i915_gem_get_seqno(ring->dev);
*seqno = i915_gem_next_request_seqno(ring);
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
@ -563,8 +549,7 @@ static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
struct drm_device *dev = ring->dev;
u32 seqno = i915_gem_get_seqno(dev);
u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@ -598,6 +583,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@ -615,8 +601,7 @@ static int
render_ring_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
struct drm_device *dev = ring->dev;
u32 seqno = i915_gem_get_seqno(dev);
u32 seqno = i915_gem_next_request_seqno(ring);
int ret;
ret = intel_ring_begin(ring, 4);
@ -790,7 +775,7 @@ ring_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
seqno = i915_gem_get_seqno(ring->dev);
seqno = i915_gem_next_request_seqno(ring);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@ -814,8 +799,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
if (IS_GEN7(dev))
gen6_gt_force_wake_get(dev_priv);
gen6_gt_force_wake_get(dev_priv);
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
@ -842,8 +826,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
}
spin_unlock(&ring->irq_lock);
if (IS_GEN7(dev))
gen6_gt_force_wake_put(dev_priv);
gen6_gt_force_wake_put(dev_priv);
}
static bool
@ -1125,11 +1108,89 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool was_interruptible;
int ret;
/* XXX As we have not yet audited all the paths to check that
* they are ready for ERESTARTSYS from intel_ring_begin, do not
* allow us to be interruptible by a signal.
*/
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
ret = i915_wait_request(ring, seqno, true);
dev_priv->mm.interruptible = was_interruptible;
return ret;
}
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
i915_gem_retire_requests_ring(ring);
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (ring->space >= n)
return 0;
}
list_for_each_entry(request, &ring->request_list, list) {
int space;
if (request->tail == -1)
continue;
space = request->tail - (ring->tail + 8);
if (space < 0)
space += ring->size;
if (space >= n) {
seqno = request->seqno;
break;
}
/* Consume this request in case we need more space than
* is available and so need to prevent a race between
* updating last_retired_head and direct reads of
* I915_RING_HEAD. It also provides a nice sanity check.
*/
request->tail = -1;
}
if (seqno == 0)
return -ENOSPC;
ret = intel_ring_wait_seqno(ring, seqno);
if (ret)
return ret;
if (WARN_ON(ring->last_retired_head == -1))
return -ENOSPC;
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (WARN_ON(ring->space < n))
return -ENOSPC;
return 0;
}
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
int ret;
u32 head;
/* If the reported head position has wrapped or hasn't advanced,
@ -1143,6 +1204,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
ret = intel_ring_wait_request(ring, n);
if (ret != -ENOSPC)
return ret;
trace_i915_ring_wait_begin(ring);
if (drm_core_check_feature(dev, DRIVER_GEM))
/* With GEM the hangcheck timer should kick us out of the loop,

View File

@ -46,6 +46,16 @@ struct intel_ring_buffer {
int effective_size;
struct intel_hw_status_page status_page;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position.
*
* last_retired_head is set to -1 after the value is consumed so
* we can detect new retirements.
*/
u32 last_retired_head;
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
@ -193,6 +203,11 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
{
return ring->tail;
}
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))

View File

@ -944,7 +944,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@ -1985,7 +1984,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
drm_connector_helper_add(&connector->base.base,
&intel_sdvo_connector_helper_funcs);
connector->base.base.interlace_allowed = 0;
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;

View File

@ -1240,7 +1240,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
drm_mode_set_crtcinfo(&mode, 0);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);

View File

@ -3222,6 +3222,7 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
return r;
}
r = r600_audio_init(rdev);

View File

@ -15,6 +15,10 @@ const struct intel_gtt {
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
/* for ppgtt PDE access */
u32 __iomem *gtt;
} *intel_gtt_get(void);
void intel_gtt_chipset_flush(void);