Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/ati_pcigart: fix the PCIGART to use drm_pci to allocate GART table.
  drm/radeon: fixup RV550 chip family
  drm/via: attempt again to stabilise the AGP DMA command submission.
  drm: Fix race that can lockup the kernel
This commit is contained in:
Linus Torvalds 2008-03-18 07:32:23 -07:00
commit ff69c00f0a
9 changed files with 110 additions and 96 deletions

View File

@ -35,42 +35,23 @@
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static void *drm_ati_alloc_pcigart_table(int order)
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
unsigned long address;
struct page *page;
int i;
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE,
gart_info->table_mask);
if (gart_info->table_handle == NULL)
return -ENOMEM;
DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
if (address == 0UL) {
return NULL;
}
page = virt_to_page(address);
for (i = 0; i < order; i++, page++)
SetPageReserved(page);
DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
return 0;
}
static void drm_ati_free_pcigart_table(void *address, int order)
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
struct page *page;
int i;
int num_pages = 1 << order;
DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);
for (i = 0; i < num_pages; i++, page++)
ClearPageReserved(page);
free_pages((unsigned long)address, order);
drm_pci_free(dev, gart_info->table_handle);
gart_info->table_handle = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
@ -78,8 +59,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
struct drm_sg_mem *entry = dev->sg;
unsigned long pages;
int i;
int order;
int num_pages, max_pages;
int max_pages;
/* we need to support large memory configurations */
if (!entry) {
@ -87,15 +67,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 0;
}
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
pci_unmap_single(dev->pdev, gart_info->bus_addr,
num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
}
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
@ -112,10 +84,9 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
gart_info->bus_addr = 0;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
&& gart_info->addr) {
drm_ati_free_pcigart_table(gart_info->addr, order);
gart_info->addr = NULL;
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
gart_info->table_handle) {
drm_ati_free_pcigart_table(dev, gart_info);
}
return 1;
@ -127,11 +98,10 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
u32 *pci_gart, page_base;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int order;
int max_pages;
int num_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
@ -141,31 +111,14 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
address = drm_ati_alloc_pcigart_table(order);
if (!address) {
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
if (!dev->pdev) {
DRM_ERROR("PCI device unknown!\n");
goto done;
}
bus_address = pci_map_single(dev->pdev, address,
num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
}
address = gart_info->table_handle->vaddr;
bus_address = gart_info->table_handle->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;

View File

@ -54,6 +54,7 @@
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/cdev.h>
#include <linux/mutex.h>
@ -551,6 +552,8 @@ struct drm_ati_pcigart_info {
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
dma_addr_t table_mask;
struct drm_dma_handle *table_handle;
drm_local_map_t mapping;
int table_size;
};

View File

@ -326,6 +326,7 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->head->dev;
int retcode = 0;
unsigned long irqflags;
lock_kernel();
@ -357,9 +358,11 @@ int drm_release(struct inode *inode, struct file *filp)
*/
do{
spin_lock(&dev->lock.spinlock);
spin_lock_irqsave(&dev->lock.spinlock,
irqflags);
locked = dev->lock.idle_has_lock;
spin_unlock(&dev->lock.spinlock);
spin_unlock_irqrestore(&dev->lock.spinlock,
irqflags);
if (locked)
break;
schedule();

View File

@ -53,6 +53,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
DECLARE_WAITQUEUE(entry, current);
struct drm_lock *lock = data;
int ret = 0;
unsigned long irqflags;
++file_priv->lock_count;
@ -71,9 +72,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
spin_lock(&dev->lock.spinlock);
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters++;
spin_unlock(&dev->lock.spinlock);
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
@ -95,9 +96,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
break;
}
}
spin_lock(&dev->lock.spinlock);
spin_lock_irqsave(&dev->lock.spinlock, irqflags);
dev->lock.user_waiters--;
spin_unlock(&dev->lock.spinlock);
spin_unlock_irqrestore(&dev->lock.spinlock, irqflags);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
@ -198,8 +199,9 @@ int drm_lock_take(struct drm_lock_data *lock_data,
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned long irqflags;
spin_lock(&lock_data->spinlock);
spin_lock_irqsave(&lock_data->spinlock, irqflags);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
@ -211,7 +213,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
@ -272,15 +274,16 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned long irqflags;
spin_lock(&lock_data->spinlock);
spin_lock_irqsave(&lock_data->spinlock, irqflags);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
return 1;
}
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
do {
old = *lock;
@ -344,19 +347,20 @@ static int drm_notifier(void *priv)
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
int ret = 0;
unsigned long irqflags;
spin_lock(&lock_data->spinlock);
spin_lock_irqsave(&lock_data->spinlock, irqflags);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock(&lock_data->spinlock);
spin_lock_irqsave(&lock_data->spinlock, irqflags);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
}
EXPORT_SYMBOL(drm_idlelock_take);
@ -364,8 +368,9 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned long irqflags;
spin_lock(&lock_data->spinlock);
spin_lock_irqsave(&lock_data->spinlock, irqflags);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
@ -376,7 +381,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
lock_data->idle_has_lock = 0;
}
}
spin_unlock(&lock_data->spinlock);
spin_unlock_irqrestore(&lock_data->spinlock, irqflags);
}
EXPORT_SYMBOL(drm_idlelock_release);

View File

@ -205,9 +205,9 @@
{0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
@ -238,6 +238,7 @@
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0, 0, 0}
#define r128_PCI_IDS \

View File

@ -558,6 +558,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
#if __OS_HAS_AGP
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;

View File

@ -1807,6 +1807,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
} else
#endif
{
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
/* if we have an offset set from userspace */
if (dev_priv->pcigart_offset_set) {
dev_priv->gart_info.bus_addr =

View File

@ -126,6 +126,8 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
hw_addr, cur_addr, next_addr);
return -1;
}
if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
msleep(1);
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
return 0;
}
@ -416,27 +418,50 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
uint32_t reader,ptr;
uint32_t diff;
paused = 0;
via_flush_write_combine();
(void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
*paused_at = pause_addr_lo;
via_flush_write_combine();
(void) *paused_at;
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
if ((ptr - reader) <= dev_priv->dma_diff ) {
count = 10000000;
while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--);
/*
* If there is a possibility that the command reader will
* miss the new pause address and pause on the old one,
* In that case we need to program the new start address
* using PCI.
*/
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
while(diff == 0 && count--) {
paused = (VIA_READ(0x41c) & 0x80000000);
if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
}
paused = VIA_READ(0x41c) & 0x80000000;
if (paused && !no_pci_fire) {
reader = *(dev_priv->hw_addr_ptr);
if ((ptr - reader) == dev_priv->dma_diff) {
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
diff &= (dev_priv->dma_high - 1);
if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
DRM_ERROR("Paused at incorrect address. "
"0x%08x, 0x%08x 0x%08x\n",
ptr, reader, dev_priv->dma_diff);
} else if (diff == 0) {
/*
* There is a concern that these writes may stall the PCI bus
* if the GPU is not idle. However, idling the GPU first
@ -577,6 +602,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi;
volatile uint32_t *last_pause_ptr;
uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
@ -603,8 +629,29 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save1 = dev_priv->dma_low;
via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
/*
* Now, set a trap that will pause the regulator if it tries to rerun the old
* command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
* and reissues the jump command over PCI, while the regulator has already taken the jump
* and actually paused at the current buffer end).
* There appears to be no other way to detect this condition, since the hw_addr_pointer
* does not seem to get updated immediately when a jump occurs.
*/
last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) - 1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save2 = dev_priv->dma_low;
dev_priv->dma_low = dma_low_save1;
via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
dev_priv->dma_low = dma_low_save2;
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}

View File

@ -603,7 +603,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
* (Not a big limitation anyway.)
*/
if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
DRM_ERROR("Too large system memory stride. Stride: %d, "
"Length: %d\n", xfer->mem_stride, xfer->line_length);
return -EINVAL;