mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 04:40:55 +07:00
drm-misc-fixes for v5.3-rc3:
- Fix some build errors in drm/bridge. - Do not build i810 on CONFIG_PREEMPTION. - Fix cache sync on arm in vgem. - Allow mapping fb in drm_client only when required, and use it to fix bochs fbdev. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAl1EPe0ACgkQ/lWMcqZw E8NwlBAAg3698yPzGFKuXQ+LDpcgUA0zL+j2uvpQjmOYsyZCIAkT8gzKP9Z+vySx ga7mm15uCDE4J3WFYBMIsI3m+r0d3Fdyt2l/r7TTjnNMBE0/X8rtpCb5EWWLuCbf t6hX4gfwMXz5li7EcQKVq+oeOSuqQV8fPJYgJgdub8ePft3yx3QNDRLBz+BRDap9 NQEwaY0XwYq2PWGx5sNP6Ku8E1tIOzit+UGonkBnYy6RmZ9CPSiR2QIFN3llJ00+ UhGWcr+CXljIj1CkaDJA/f6FDVBo5dbGkhSJUHirB0csDnvq4ena5xKg8FGFAQ3e RLKMwRbTXKHlTY/wxl9iibUR2wNA0DnQYIeWBBaEGpUtnjmMgzugPWGXWm6+qIZ2 zl2k2cTSJmgubzABY+FSI27MwYpgqT9Bh5pceJitx4/ijojgdN+KzkN5Nlw7XcAt ejRbIdfMrtN6XRgoNtliBNyfzLccb4ONNQNbvKYTo6pIxbgSnlnHGF+VIxPUvBnN oEbd7OcjaBEU0eL0gZWAFlFeZlMvqaYk1dlC4wMqPECIDMTJV6F5CyXILq2PVjMj +i2+5Twqa2L7naOFedn0B6wfaVAiXtR5VZ9lzXc8l5L8KpF46Yajx/ndAn+iGAWC ERiqwMvhCy6eK9JUbEyhrL+NPiIxTsbgV+VD4cLsFtkh2cR484I= =oX0Q -----END PGP SIGNATURE----- Merge tag 'drm-misc-fixes-2019-08-02' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes drm-misc-fixes for v5.3-rc3: - Fix some build errors in drm/bridge. - Do not build i810 on CONFIG_PREEMPTION. - Fix cache sync on arm in vgem. - Allow mapping fb in drm_client only when required, and use it to fix bochs fbdev. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/af0dc371-16e0-cee8-0d71-4824d44aa973@linux.intel.com
This commit is contained in:
commit
5fd5d2b7c5
@ -394,7 +394,7 @@ config DRM_R128
|
||||
config DRM_I810
|
||||
tristate "Intel I810"
|
||||
# !PREEMPT because of missing ioctl locking
|
||||
depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
|
||||
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
|
||||
help
|
||||
Choose this option if you have an Intel I810 graphics card. If M is
|
||||
selected, the module will be called i810. AGP support is required
|
||||
|
@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
|
||||
bochs->dev->mode_config.fb_base = bochs->fb_base;
|
||||
bochs->dev->mode_config.preferred_depth = 24;
|
||||
bochs->dev->mode_config.prefer_shadow = 0;
|
||||
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
|
||||
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
|
||||
|
||||
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
|
||||
|
@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
|
||||
config DRM_LVDS_ENCODER
|
||||
tristate "Transparent parallel to LVDS encoder support"
|
||||
depends on OF
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL_BRIDGE
|
||||
help
|
||||
Support for transparent parallel to LVDS encoders that don't require
|
||||
@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
|
||||
|
||||
config DRM_TOSHIBA_TC358764
|
||||
tristate "TC358764 DSI/LVDS bridge"
|
||||
depends on DRM && DRM_PANEL
|
||||
depends on OF
|
||||
select DRM_MIPI_DSI
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_PANEL
|
||||
help
|
||||
Toshiba TC358764 DSI/LVDS bridge driver.
|
||||
|
||||
|
@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
|
||||
struct drm_device *dev = client->dev;
|
||||
struct drm_client_buffer *buffer;
|
||||
struct drm_gem_object *obj;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
|
||||
|
||||
buffer->gem = obj;
|
||||
|
||||
return buffer;
|
||||
|
||||
err_delete:
|
||||
drm_client_buffer_delete(buffer);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_client_buffer_vmap - Map DRM client buffer into address space
|
||||
* @buffer: DRM client buffer
|
||||
*
|
||||
* This function maps a client buffer into kernel address space. If the
|
||||
* buffer is already mapped, it returns the mapping's address.
|
||||
*
|
||||
* Client buffer mappings are not ref'counted. Each call to
|
||||
* drm_client_buffer_vmap() should be followed by a call to
|
||||
* drm_client_buffer_vunmap(); or the client buffer should be mapped
|
||||
* throughout its lifetime.
|
||||
*
|
||||
* Returns:
|
||||
* The mapped memory's address
|
||||
*/
|
||||
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
if (buffer->vaddr)
|
||||
return buffer->vaddr;
|
||||
|
||||
/*
|
||||
* FIXME: The dependency on GEM here isn't required, we could
|
||||
* convert the driver handle to a dma-buf instead and use the
|
||||
@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
|
||||
* fd_install step out of the driver backend hooks, to make that
|
||||
* final step optional for internal users.
|
||||
*/
|
||||
vaddr = drm_gem_vmap(obj);
|
||||
if (IS_ERR(vaddr)) {
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto err_delete;
|
||||
}
|
||||
vaddr = drm_gem_vmap(buffer->gem);
|
||||
if (IS_ERR(vaddr))
|
||||
return vaddr;
|
||||
|
||||
buffer->vaddr = vaddr;
|
||||
|
||||
return buffer;
|
||||
|
||||
err_delete:
|
||||
drm_client_buffer_delete(buffer);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_buffer_vmap);
|
||||
|
||||
/**
|
||||
* drm_client_buffer_vunmap - Unmap DRM client buffer
|
||||
* @buffer: DRM client buffer
|
||||
*
|
||||
* This function removes a client buffer's memory mapping. Calling this
|
||||
* function is only required by clients that manage their buffer mappings
|
||||
* by themselves.
|
||||
*/
|
||||
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
|
||||
{
|
||||
drm_gem_vunmap(buffer->gem, buffer->vaddr);
|
||||
buffer->vaddr = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_buffer_vunmap);
|
||||
|
||||
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
|
||||
{
|
||||
|
@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
|
||||
struct drm_clip_rect *clip = &helper->dirty_clip;
|
||||
struct drm_clip_rect clip_copy;
|
||||
unsigned long flags;
|
||||
void *vaddr;
|
||||
|
||||
spin_lock_irqsave(&helper->dirty_lock, flags);
|
||||
clip_copy = *clip;
|
||||
@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
|
||||
|
||||
/* call dirty callback only when it has been really touched */
|
||||
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
|
||||
|
||||
/* Generic fbdev uses a shadow buffer */
|
||||
if (helper->buffer)
|
||||
if (helper->buffer) {
|
||||
vaddr = drm_client_buffer_vmap(helper->buffer);
|
||||
if (IS_ERR(vaddr))
|
||||
return;
|
||||
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
|
||||
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
|
||||
}
|
||||
if (helper->fb->funcs->dirty)
|
||||
helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
|
||||
&clip_copy, 1);
|
||||
|
||||
if (helper->buffer)
|
||||
drm_client_buffer_vunmap(helper->buffer);
|
||||
}
|
||||
}
|
||||
|
||||
@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
|
||||
|
||||
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
|
||||
return dev->mode_config.prefer_shadow_fbdev ||
|
||||
dev->mode_config.prefer_shadow ||
|
||||
fb->funcs->dirty;
|
||||
}
|
||||
|
||||
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
|
||||
u32 width, u32 height)
|
||||
{
|
||||
@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
|
||||
struct drm_clip_rect *clip = &helper->dirty_clip;
|
||||
unsigned long flags;
|
||||
|
||||
if (!helper->fb->funcs->dirty)
|
||||
if (!drm_fbdev_use_shadow_fb(helper))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&helper->dirty_lock, flags);
|
||||
@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
||||
struct drm_framebuffer *fb;
|
||||
struct fb_info *fbi;
|
||||
u32 format;
|
||||
void *vaddr;
|
||||
|
||||
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
|
||||
sizes->surface_width, sizes->surface_height,
|
||||
@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
||||
fbi->fbops = &drm_fbdev_fb_ops;
|
||||
fbi->screen_size = fb->height * fb->pitches[0];
|
||||
fbi->fix.smem_len = fbi->screen_size;
|
||||
fbi->screen_buffer = buffer->vaddr;
|
||||
/* Shamelessly leak the physical address to user-space */
|
||||
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
|
||||
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
|
||||
fbi->fix.smem_start =
|
||||
page_to_phys(virt_to_page(fbi->screen_buffer));
|
||||
#endif
|
||||
|
||||
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
|
||||
|
||||
if (fb->funcs->dirty) {
|
||||
if (drm_fbdev_use_shadow_fb(fb_helper)) {
|
||||
struct fb_ops *fbops;
|
||||
void *shadow;
|
||||
|
||||
@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
|
||||
fbi->fbdefio = &drm_fbdev_defio;
|
||||
|
||||
fb_deferred_io_init(fbi);
|
||||
} else {
|
||||
/* buffer is mapped for HW framebuffer */
|
||||
vaddr = drm_client_buffer_vmap(fb_helper->buffer);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
fbi->screen_buffer = vaddr;
|
||||
/* Shamelessly leak the physical address to user-space */
|
||||
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
|
||||
if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
|
||||
fbi->fix.smem_start =
|
||||
page_to_phys(virt_to_page(fbi->screen_buffer));
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -47,10 +47,16 @@ static struct vgem_device {
|
||||
struct platform_device *platform;
|
||||
} *vgem_device;
|
||||
|
||||
static void sync_and_unpin(struct drm_vgem_gem_object *bo);
|
||||
static struct page **pin_and_sync(struct drm_vgem_gem_object *bo);
|
||||
|
||||
static void vgem_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
|
||||
|
||||
if (!obj->import_attach)
|
||||
sync_and_unpin(vgem_obj);
|
||||
|
||||
kvfree(vgem_obj->pages);
|
||||
mutex_destroy(&vgem_obj->pages_lock);
|
||||
|
||||
@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
mutex_lock(&obj->pages_lock);
|
||||
if (!obj->pages)
|
||||
pin_and_sync(obj);
|
||||
if (obj->pages) {
|
||||
get_page(obj->pages[page_offset]);
|
||||
vmf->page = obj->pages[page_offset];
|
||||
ret = 0;
|
||||
}
|
||||
mutex_unlock(&obj->pages_lock);
|
||||
if (ret) {
|
||||
struct page *page;
|
||||
|
||||
page = shmem_read_mapping_page(
|
||||
file_inode(obj->base.filp)->i_mapping,
|
||||
page_offset);
|
||||
if (!IS_ERR(page)) {
|
||||
vmf->page = page;
|
||||
ret = 0;
|
||||
} else switch (PTR_ERR(page)) {
|
||||
case -ENOSPC:
|
||||
case -ENOMEM:
|
||||
ret = VM_FAULT_OOM;
|
||||
break;
|
||||
case -EBUSY:
|
||||
ret = VM_FAULT_RETRY;
|
||||
break;
|
||||
case -EFAULT:
|
||||
case -EINVAL:
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(PTR_ERR(page));
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -277,32 +258,93 @@ static const struct file_operations vgem_driver_fops = {
|
||||
.release = drm_release,
|
||||
};
|
||||
|
||||
/* Called under pages_lock, except in free path (where it can't race): */
|
||||
static void sync_and_unpin(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
struct drm_device *dev = bo->base.dev;
|
||||
|
||||
if (bo->table) {
|
||||
dma_sync_sg_for_cpu(dev->dev, bo->table->sgl,
|
||||
bo->table->nents, DMA_BIDIRECTIONAL);
|
||||
sg_free_table(bo->table);
|
||||
kfree(bo->table);
|
||||
bo->table = NULL;
|
||||
}
|
||||
|
||||
if (bo->pages) {
|
||||
drm_gem_put_pages(&bo->base, bo->pages, true, true);
|
||||
bo->pages = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct page **pin_and_sync(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
struct drm_device *dev = bo->base.dev;
|
||||
int npages = bo->base.size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
struct sg_table *sgt;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&bo->pages_lock));
|
||||
|
||||
pages = drm_gem_get_pages(&bo->base);
|
||||
if (IS_ERR(pages)) {
|
||||
bo->pages_pin_count--;
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
return pages;
|
||||
}
|
||||
|
||||
sgt = drm_prime_pages_to_sg(pages, npages);
|
||||
if (IS_ERR(sgt)) {
|
||||
dev_err(dev->dev,
|
||||
"failed to allocate sgt: %ld\n",
|
||||
PTR_ERR(bo->table));
|
||||
drm_gem_put_pages(&bo->base, pages, false, false);
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
return ERR_CAST(bo->table);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the object from the CPU cache so that importers
|
||||
* can rely on coherent indirect access via the exported
|
||||
* dma-address.
|
||||
*/
|
||||
dma_sync_sg_for_device(dev->dev, sgt->sgl,
|
||||
sgt->nents, DMA_BIDIRECTIONAL);
|
||||
|
||||
bo->pages = pages;
|
||||
bo->table = sgt;
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
struct page **pages;
|
||||
|
||||
mutex_lock(&bo->pages_lock);
|
||||
if (bo->pages_pin_count++ == 0) {
|
||||
struct page **pages;
|
||||
|
||||
pages = drm_gem_get_pages(&bo->base);
|
||||
if (IS_ERR(pages)) {
|
||||
bo->pages_pin_count--;
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
return pages;
|
||||
}
|
||||
|
||||
bo->pages = pages;
|
||||
if (bo->pages_pin_count++ == 0 && !bo->pages) {
|
||||
pages = pin_and_sync(bo);
|
||||
} else {
|
||||
WARN_ON(!bo->pages);
|
||||
pages = bo->pages;
|
||||
}
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
|
||||
return bo->pages;
|
||||
return pages;
|
||||
}
|
||||
|
||||
static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
|
||||
{
|
||||
/*
|
||||
* We shouldn't hit this for imported bo's.. in the import
|
||||
* case we don't own the scatter-table
|
||||
*/
|
||||
WARN_ON(bo->base.import_attach);
|
||||
|
||||
mutex_lock(&bo->pages_lock);
|
||||
if (--bo->pages_pin_count == 0) {
|
||||
drm_gem_put_pages(&bo->base, bo->pages, true, true);
|
||||
bo->pages = NULL;
|
||||
WARN_ON(!bo->table);
|
||||
sync_and_unpin(bo);
|
||||
}
|
||||
mutex_unlock(&bo->pages_lock);
|
||||
}
|
||||
@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
|
||||
static int vgem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
|
||||
long n_pages = obj->size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
|
||||
pages = vgem_pin_pages(bo);
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
/* Flush the object from the CPU cache so that importers can rely
|
||||
* on coherent indirect access via the exported dma-address.
|
||||
*/
|
||||
drm_clflush_pages(pages, n_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -149,6 +149,8 @@ struct drm_client_buffer {
|
||||
struct drm_client_buffer *
|
||||
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
|
||||
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
|
||||
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
|
||||
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
|
||||
|
||||
int drm_client_modeset_create(struct drm_client_dev *client);
|
||||
void drm_client_modeset_free(struct drm_client_dev *client);
|
||||
|
@ -852,6 +852,13 @@ struct drm_mode_config {
|
||||
/* dumb ioctl parameters */
|
||||
uint32_t preferred_depth, prefer_shadow;
|
||||
|
||||
/**
|
||||
* @prefer_shadow_fbdev:
|
||||
*
|
||||
* Hint to framebuffer emulation to prefer shadow-fb rendering.
|
||||
*/
|
||||
bool prefer_shadow_fbdev;
|
||||
|
||||
/**
|
||||
* @quirk_addfb_prefer_xbgr_30bpp:
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user