2010-12-15 04:14:24 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Red Hat
|
|
|
|
*
|
|
|
|
* based in parts on udlfb.c:
|
|
|
|
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
|
|
|
|
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
|
|
|
|
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License v2. See the file COPYING in the main directory of this archive for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/fb.h>
|
2012-07-30 11:06:55 +07:00
|
|
|
#include <linux/dma-buf.h>
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2012-10-03 00:01:07 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/drm_crtc.h>
|
|
|
|
#include <drm/drm_crtc_helper.h>
|
2010-12-15 04:14:24 +07:00
|
|
|
#include "udl_drv.h"
|
|
|
|
|
2012-10-03 00:01:07 +07:00
|
|
|
#include <drm/drm_fb_helper.h>
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2013-02-07 09:30:25 +07:00
|
|
|
#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2013-02-07 09:30:25 +07:00
|
|
|
static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
|
2010-12-15 04:14:24 +07:00
|
|
|
static int fb_bpp = 16;
|
|
|
|
|
|
|
|
module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
|
|
|
|
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
|
|
|
|
|
|
|
|
struct udl_fbdev {
|
|
|
|
struct drm_fb_helper helper;
|
|
|
|
struct udl_framebuffer ufb;
|
|
|
|
int fb_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
|
|
|
|
#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
|
|
|
|
|
|
|
|
/** Read the red component (0..255) of a 32 bpp colour. */
|
|
|
|
#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
|
|
|
|
|
|
|
|
/** Read the green component (0..255) of a 32 bpp colour. */
|
|
|
|
#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
|
|
|
|
|
|
|
|
/** Read the blue component (0..255) of a 32 bpp colour. */
|
|
|
|
#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
|
|
|
|
|
|
|
|
/** Return red/green component of a 16 bpp colour number. */
|
|
|
|
#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
|
|
|
|
|
|
|
|
/** Return green/blue component of a 16 bpp colour number. */
|
|
|
|
#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
|
|
|
|
|
|
|
|
/** Return 8 bpp colour number from red, green and blue components. */
|
|
|
|
#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
static uint8_t rgb8(uint32_t col)
|
|
|
|
{
|
|
|
|
uint8_t red = DLO_RGB_GETRED(col);
|
|
|
|
uint8_t grn = DLO_RGB_GETGRN(col);
|
|
|
|
uint8_t blu = DLO_RGB_GETBLU(col);
|
|
|
|
|
|
|
|
return DLO_RGB8(red, grn, blu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t rgb16(uint32_t col)
|
|
|
|
{
|
|
|
|
uint8_t red = DLO_RGB_GETRED(col);
|
|
|
|
uint8_t grn = DLO_RGB_GETGRN(col);
|
|
|
|
uint8_t blu = DLO_RGB_GETBLU(col);
|
|
|
|
|
|
|
|
return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE: fb_defio.c is holding info->fbdefio.mutex
|
|
|
|
* Touching ANY framebuffer memory that triggers a page fault
|
|
|
|
* in fb_defio will cause a deadlock, when it also tries to
|
|
|
|
* grab the same mutex.
|
|
|
|
*/
|
|
|
|
static void udlfb_dpy_deferred_io(struct fb_info *info,
|
|
|
|
struct list_head *pagelist)
|
|
|
|
{
|
|
|
|
struct page *cur;
|
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
struct drm_device *dev = ufbdev->ufb.base.dev;
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
struct urb *urb;
|
|
|
|
char *cmd;
|
|
|
|
cycles_t start_cycles, end_cycles;
|
|
|
|
int bytes_sent = 0;
|
|
|
|
int bytes_identical = 0;
|
|
|
|
int bytes_rendered = 0;
|
|
|
|
|
|
|
|
if (!fb_defio)
|
|
|
|
return;
|
|
|
|
|
|
|
|
start_cycles = get_cycles();
|
|
|
|
|
|
|
|
urb = udl_get_urb(dev);
|
|
|
|
if (!urb)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cmd = urb->transfer_buffer;
|
|
|
|
|
|
|
|
/* walk the written page list and render each to device */
|
|
|
|
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
|
|
|
|
|
|
|
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
|
2012-11-01 10:47:09 +07:00
|
|
|
&urb, (char *) info->fix.smem_start,
|
|
|
|
&cmd, cur->index << PAGE_SHIFT,
|
|
|
|
cur->index << PAGE_SHIFT,
|
|
|
|
PAGE_SIZE, &bytes_identical, &bytes_sent))
|
2010-12-15 04:14:24 +07:00
|
|
|
goto error;
|
|
|
|
bytes_rendered += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd > (char *) urb->transfer_buffer) {
|
|
|
|
/* Send partial buffer remaining before exiting */
|
|
|
|
int len = cmd - (char *) urb->transfer_buffer;
|
|
|
|
udl_submit_urb(dev, urb, len);
|
|
|
|
bytes_sent += len;
|
|
|
|
} else
|
|
|
|
udl_urb_completion(urb);
|
|
|
|
|
|
|
|
error:
|
|
|
|
atomic_add(bytes_sent, &udl->bytes_sent);
|
|
|
|
atomic_add(bytes_identical, &udl->bytes_identical);
|
|
|
|
atomic_add(bytes_rendered, &udl->bytes_rendered);
|
|
|
|
end_cycles = get_cycles();
|
|
|
|
atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
|
|
|
>> 10)), /* Kcycles */
|
|
|
|
&udl->cpu_kcycles_used);
|
|
|
|
}
|
|
|
|
|
|
|
|
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
|
|
|
int width, int height)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = fb->base.dev;
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
int i, ret;
|
|
|
|
char *cmd;
|
|
|
|
cycles_t start_cycles, end_cycles;
|
|
|
|
int bytes_sent = 0;
|
|
|
|
int bytes_identical = 0;
|
|
|
|
struct urb *urb;
|
|
|
|
int aligned_x;
|
|
|
|
int bpp = (fb->base.bits_per_pixel / 8);
|
2013-02-07 08:19:15 +07:00
|
|
|
int x2, y2;
|
|
|
|
bool store_for_later = false;
|
|
|
|
unsigned long flags;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
if (!fb->active_16)
|
|
|
|
return 0;
|
|
|
|
|
2012-03-26 20:36:56 +07:00
|
|
|
if (!fb->obj->vmapping) {
|
|
|
|
ret = udl_gem_vmap(fb->obj);
|
|
|
|
if (ret == -ENOMEM) {
|
|
|
|
DRM_ERROR("failed to vmap fb\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!fb->obj->vmapping) {
|
|
|
|
DRM_ERROR("failed to vmapping\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
|
|
|
|
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
|
|
|
|
x = aligned_x;
|
|
|
|
|
|
|
|
if ((width <= 0) ||
|
|
|
|
(x + width > fb->base.width) ||
|
|
|
|
(y + height > fb->base.height))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-02-07 08:19:15 +07:00
|
|
|
/* if we are in atomic just store the info
|
|
|
|
can't test inside spin lock */
|
|
|
|
if (in_atomic())
|
|
|
|
store_for_later = true;
|
|
|
|
|
|
|
|
x2 = x + width - 1;
|
|
|
|
y2 = y + height - 1;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&fb->dirty_lock, flags);
|
|
|
|
|
|
|
|
if (fb->y1 < y)
|
|
|
|
y = fb->y1;
|
|
|
|
if (fb->y2 > y2)
|
|
|
|
y2 = fb->y2;
|
|
|
|
if (fb->x1 < x)
|
|
|
|
x = fb->x1;
|
|
|
|
if (fb->x2 > x2)
|
|
|
|
x2 = fb->x2;
|
|
|
|
|
|
|
|
if (store_for_later) {
|
|
|
|
fb->x1 = x;
|
|
|
|
fb->x2 = x2;
|
|
|
|
fb->y1 = y;
|
|
|
|
fb->y2 = y2;
|
|
|
|
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
fb->x1 = fb->y1 = INT_MAX;
|
|
|
|
fb->x2 = fb->y2 = 0;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&fb->dirty_lock, flags);
|
|
|
|
start_cycles = get_cycles();
|
|
|
|
|
2010-12-15 04:14:24 +07:00
|
|
|
urb = udl_get_urb(dev);
|
|
|
|
if (!urb)
|
|
|
|
return 0;
|
|
|
|
cmd = urb->transfer_buffer;
|
|
|
|
|
2013-02-07 08:19:15 +07:00
|
|
|
for (i = y; i <= y2 ; i++) {
|
2010-12-15 04:14:24 +07:00
|
|
|
const int line_offset = fb->base.pitches[0] * i;
|
|
|
|
const int byte_offset = line_offset + (x * bpp);
|
2012-11-01 10:47:09 +07:00
|
|
|
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
|
2010-12-15 04:14:24 +07:00
|
|
|
if (udl_render_hline(dev, bpp, &urb,
|
|
|
|
(char *) fb->obj->vmapping,
|
2012-11-01 10:47:09 +07:00
|
|
|
&cmd, byte_offset, dev_byte_offset,
|
2013-02-07 08:19:15 +07:00
|
|
|
(x2 - x + 1) * bpp,
|
2010-12-15 04:14:24 +07:00
|
|
|
&bytes_identical, &bytes_sent))
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd > (char *) urb->transfer_buffer) {
|
|
|
|
/* Send partial buffer remaining before exiting */
|
|
|
|
int len = cmd - (char *) urb->transfer_buffer;
|
|
|
|
ret = udl_submit_urb(dev, urb, len);
|
|
|
|
bytes_sent += len;
|
|
|
|
} else
|
|
|
|
udl_urb_completion(urb);
|
|
|
|
|
|
|
|
error:
|
|
|
|
atomic_add(bytes_sent, &udl->bytes_sent);
|
|
|
|
atomic_add(bytes_identical, &udl->bytes_identical);
|
|
|
|
atomic_add(width*height*bpp, &udl->bytes_rendered);
|
|
|
|
end_cycles = get_cycles();
|
|
|
|
atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
|
|
|
>> 10)), /* Kcycles */
|
|
|
|
&udl->cpu_kcycles_used);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
unsigned long start = vma->vm_start;
|
|
|
|
unsigned long size = vma->vm_end - vma->vm_start;
|
|
|
|
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
|
|
|
unsigned long page, pos;
|
|
|
|
|
|
|
|
if (offset + size > info->fix.smem_len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pos = (unsigned long)info->fix.smem_start + offset;
|
|
|
|
|
|
|
|
pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
|
|
|
|
pos, size);
|
|
|
|
|
|
|
|
while (size > 0) {
|
|
|
|
page = vmalloc_to_pfn((void *)pos);
|
|
|
|
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
start += PAGE_SIZE;
|
|
|
|
pos += PAGE_SIZE;
|
|
|
|
if (size > PAGE_SIZE)
|
|
|
|
size -= PAGE_SIZE;
|
|
|
|
else
|
|
|
|
size = 0;
|
|
|
|
}
|
|
|
|
|
mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA,
currently it lost original meaning but still has some effects:
| effect | alternative flags
-+------------------------+---------------------------------------------
1| account as reserved_vm | VM_IO
2| skip in core dump | VM_IO, VM_DONTDUMP
3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
This patch removes reserved_vm counter from mm_struct. Seems like nobody
cares about it, it does not exported into userspace directly, it only
reduces total_vm showed in proc.
Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP.
remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP.
remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP.
[akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 06:29:02 +07:00
|
|
|
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
|
2010-12-15 04:14:24 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
|
|
|
{
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
|
2016-04-28 22:18:31 +07:00
|
|
|
sys_fillrect(info, rect);
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
|
|
|
|
rect->height);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
|
|
|
{
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
|
2016-04-28 22:18:31 +07:00
|
|
|
sys_copyarea(info, region);
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
|
|
|
|
region->height);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
|
|
|
{
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
|
2016-04-28 22:18:31 +07:00
|
|
|
sys_imageblit(info, image);
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
|
|
|
|
image->height);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's common for several clients to have framebuffer open simultaneously.
|
|
|
|
* e.g. both fbcon and X. Makes things interesting.
|
|
|
|
* Assumes caller is holding info->lock (for open and release at least)
|
|
|
|
*/
|
|
|
|
static int udl_fb_open(struct fb_info *info, int user)
|
|
|
|
{
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
struct drm_device *dev = ufbdev->ufb.base.dev;
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
|
|
|
|
/* If the USB device is gone, we don't accept new opens */
|
|
|
|
if (drm_device_is_unplugged(udl->ddev))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ufbdev->fb_count++;
|
|
|
|
|
|
|
|
if (fb_defio && (info->fbdefio == NULL)) {
|
|
|
|
/* enable defio at last moment if not disabled by client */
|
|
|
|
|
|
|
|
struct fb_deferred_io *fbdefio;
|
|
|
|
|
|
|
|
fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (fbdefio) {
|
|
|
|
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
|
|
|
|
fbdefio->deferred_io = udlfb_dpy_deferred_io;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->fbdefio = fbdefio;
|
|
|
|
fb_deferred_io_init(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
|
|
|
|
info->node, user, info, ufbdev->fb_count);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assumes caller is holding info->lock mutex (for open and release at least)
|
|
|
|
*/
|
|
|
|
static int udl_fb_release(struct fb_info *info, int user)
|
|
|
|
{
|
|
|
|
struct udl_fbdev *ufbdev = info->par;
|
|
|
|
|
|
|
|
ufbdev->fb_count--;
|
|
|
|
|
|
|
|
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
|
|
|
|
fb_deferred_io_cleanup(info);
|
|
|
|
kfree(info->fbdefio);
|
|
|
|
info->fbdefio = NULL;
|
|
|
|
info->fbops->fb_mmap = udl_fb_mmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_warn("released /dev/fb%d user=%d count=%d\n",
|
|
|
|
info->node, user, ufbdev->fb_count);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct fb_ops udlfb_ops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.fb_check_var = drm_fb_helper_check_var,
|
|
|
|
.fb_set_par = drm_fb_helper_set_par,
|
|
|
|
.fb_fillrect = udl_fb_fillrect,
|
|
|
|
.fb_copyarea = udl_fb_copyarea,
|
|
|
|
.fb_imageblit = udl_fb_imageblit,
|
|
|
|
.fb_pan_display = drm_fb_helper_pan_display,
|
|
|
|
.fb_blank = drm_fb_helper_blank,
|
|
|
|
.fb_setcmap = drm_fb_helper_setcmap,
|
|
|
|
.fb_debug_enter = drm_fb_helper_debug_enter,
|
|
|
|
.fb_debug_leave = drm_fb_helper_debug_leave,
|
|
|
|
.fb_mmap = udl_fb_mmap,
|
|
|
|
.fb_open = udl_fb_open,
|
|
|
|
.fb_release = udl_fb_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
|
|
|
struct drm_file *file,
|
|
|
|
unsigned flags, unsigned color,
|
|
|
|
struct drm_clip_rect *clips,
|
|
|
|
unsigned num_clips)
|
|
|
|
{
|
|
|
|
struct udl_framebuffer *ufb = to_udl_fb(fb);
|
|
|
|
int i;
|
2012-07-30 11:06:55 +07:00
|
|
|
int ret = 0;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2013-12-04 19:13:58 +07:00
|
|
|
drm_modeset_lock_all(fb->dev);
|
|
|
|
|
2010-12-15 04:14:24 +07:00
|
|
|
if (!ufb->active_16)
|
2013-12-04 19:13:58 +07:00
|
|
|
goto unlock;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2012-07-30 11:06:55 +07:00
|
|
|
if (ufb->obj->base.import_attach) {
|
|
|
|
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (ret)
|
2013-12-04 19:13:58 +07:00
|
|
|
goto unlock;
|
2012-07-30 11:06:55 +07:00
|
|
|
}
|
|
|
|
|
2010-12-15 04:14:24 +07:00
|
|
|
for (i = 0; i < num_clips; i++) {
|
2012-07-30 11:06:55 +07:00
|
|
|
ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
|
2010-12-15 04:14:24 +07:00
|
|
|
clips[i].x2 - clips[i].x1,
|
|
|
|
clips[i].y2 - clips[i].y1);
|
2012-07-30 11:06:55 +07:00
|
|
|
if (ret)
|
2014-01-21 01:52:29 +07:00
|
|
|
break;
|
2010-12-15 04:14:24 +07:00
|
|
|
}
|
2012-07-30 11:06:55 +07:00
|
|
|
|
|
|
|
if (ufb->obj->base.import_attach) {
|
dma-buf, drm, ion: Propagate error code from dma_buf_start_cpu_access()
Drivers, especially i915.ko, can fail during the initial migration of a
dma-buf for CPU access. However, the error code from the driver was not
being propagated back to ioctl and so userspace was blissfully ignorant
of the failure. Rendering corruption ensues.
Whilst fixing the ioctl to return the error code from
dma_buf_start_cpu_access(), also do the same for
dma_buf_end_cpu_access(). For most drivers, dma_buf_end_cpu_access()
cannot fail. i915.ko however, as most drivers would, wants to avoid being
uninterruptible (as would be required to guarrantee no failure when
flushing the buffer to the device). As userspace already has to handle
errors from the SYNC_IOCTL, take advantage of this to be able to restart
the syscall across signals.
This fixes a coherency issue for i915.ko as well as reducing the
uninterruptible hold upon its BKL, the struct_mutex.
Fixes commit c11e391da2a8fe973c3c2398452000bed505851e
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date: Thu Feb 11 20:04:51 2016 -0200
dma-buf: Add ioctls to allow userspace to flush
Testcase: igt/gem_concurrent_blit/*dmabuf*interruptible
Testcase: igt/prime_mmap_coherency/ioctl-errors
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tiago Vignatti <tiago.vignatti@intel.com>
Cc: Stéphane Marchesin <marcheu@chromium.org>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
CC: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: intel-gfx@lists.freedesktop.org
Cc: devel@driverdev.osuosl.org
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1458331359-2634-1-git-send-email-chris@chris-wilson.co.uk
2016-03-19 03:02:39 +07:00
|
|
|
ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
|
|
|
|
DMA_FROM_DEVICE);
|
2012-07-30 11:06:55 +07:00
|
|
|
}
|
2013-12-04 19:13:58 +07:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
drm_modeset_unlock_all(fb->dev);
|
|
|
|
|
2012-07-30 11:06:55 +07:00
|
|
|
return ret;
|
2010-12-15 04:14:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|
|
|
{
|
|
|
|
struct udl_framebuffer *ufb = to_udl_fb(fb);
|
|
|
|
|
|
|
|
if (ufb->obj)
|
|
|
|
drm_gem_object_unreference_unlocked(&ufb->obj->base);
|
|
|
|
|
|
|
|
drm_framebuffer_cleanup(fb);
|
|
|
|
kfree(ufb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_framebuffer_funcs udlfb_funcs = {
|
|
|
|
.destroy = udl_user_framebuffer_destroy,
|
|
|
|
.dirty = udl_user_framebuffer_dirty,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
udl_framebuffer_init(struct drm_device *dev,
|
|
|
|
struct udl_framebuffer *ufb,
|
2015-11-12 00:11:29 +07:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd,
|
2010-12-15 04:14:24 +07:00
|
|
|
struct udl_gem_object *obj)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2013-02-07 08:19:15 +07:00
|
|
|
spin_lock_init(&ufb->dirty_lock);
|
2010-12-15 04:14:24 +07:00
|
|
|
ufb->obj = obj;
|
|
|
|
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
|
2012-12-14 05:38:38 +07:00
|
|
|
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
|
2010-12-15 04:14:24 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-22 05:42:49 +07:00
|
|
|
static int udlfb_create(struct drm_fb_helper *helper,
|
2010-12-15 04:14:24 +07:00
|
|
|
struct drm_fb_helper_surface_size *sizes)
|
|
|
|
{
|
2014-09-14 23:40:22 +07:00
|
|
|
struct udl_fbdev *ufbdev =
|
|
|
|
container_of(helper, struct udl_fbdev, helper);
|
2010-12-15 04:14:24 +07:00
|
|
|
struct drm_device *dev = ufbdev->helper.dev;
|
|
|
|
struct fb_info *info;
|
|
|
|
struct drm_framebuffer *fb;
|
|
|
|
struct drm_mode_fb_cmd2 mode_cmd;
|
|
|
|
struct udl_gem_object *obj;
|
|
|
|
uint32_t size;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (sizes->surface_bpp == 24)
|
|
|
|
sizes->surface_bpp = 32;
|
|
|
|
|
|
|
|
mode_cmd.width = sizes->surface_width;
|
|
|
|
mode_cmd.height = sizes->surface_height;
|
|
|
|
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
|
|
|
|
|
|
|
|
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
|
|
|
sizes->surface_depth);
|
|
|
|
|
|
|
|
size = mode_cmd.pitches[0] * mode_cmd.height;
|
|
|
|
size = ALIGN(size, PAGE_SIZE);
|
|
|
|
|
|
|
|
obj = udl_gem_alloc_object(dev, size);
|
|
|
|
if (!obj)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = udl_gem_vmap(obj);
|
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("failed to vmap fb\n");
|
|
|
|
goto out_gfree;
|
|
|
|
}
|
|
|
|
|
2015-07-22 16:28:16 +07:00
|
|
|
info = drm_fb_helper_alloc_fbi(helper);
|
|
|
|
if (IS_ERR(info)) {
|
|
|
|
ret = PTR_ERR(info);
|
2010-12-15 04:14:24 +07:00
|
|
|
goto out_gfree;
|
|
|
|
}
|
|
|
|
info->par = ufbdev;
|
|
|
|
|
|
|
|
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
|
|
|
|
if (ret)
|
2015-07-22 16:28:16 +07:00
|
|
|
goto out_destroy_fbi;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
fb = &ufbdev->ufb.base;
|
|
|
|
|
|
|
|
ufbdev->helper.fb = fb;
|
|
|
|
|
|
|
|
strcpy(info->fix.id, "udldrmfb");
|
|
|
|
|
|
|
|
info->screen_base = ufbdev->ufb.obj->vmapping;
|
|
|
|
info->fix.smem_len = size;
|
|
|
|
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
|
|
|
|
|
|
|
|
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
|
|
|
info->fbops = &udlfb_ops;
|
|
|
|
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
|
|
|
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
|
|
|
|
|
|
|
|
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
|
|
|
|
fb->width, fb->height,
|
|
|
|
ufbdev->ufb.obj->vmapping);
|
|
|
|
|
|
|
|
return ret;
|
2015-07-22 16:28:16 +07:00
|
|
|
out_destroy_fbi:
|
|
|
|
drm_fb_helper_release_fbi(helper);
|
2010-12-15 04:14:24 +07:00
|
|
|
out_gfree:
|
2016-03-30 16:40:43 +07:00
|
|
|
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
|
2010-12-15 04:14:24 +07:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-27 22:19:23 +07:00
|
|
|
static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
|
2013-01-22 05:42:49 +07:00
|
|
|
.fb_probe = udlfb_create,
|
2010-12-15 04:14:24 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static void udl_fbdev_destroy(struct drm_device *dev,
|
|
|
|
struct udl_fbdev *ufbdev)
|
|
|
|
{
|
2015-07-22 16:28:16 +07:00
|
|
|
drm_fb_helper_unregister_fbi(&ufbdev->helper);
|
|
|
|
drm_fb_helper_release_fbi(&ufbdev->helper);
|
2010-12-15 04:14:24 +07:00
|
|
|
drm_fb_helper_fini(&ufbdev->helper);
|
drm: revamp framebuffer cleanup interfaces
We have two classes of framebuffer
- Created by the driver (atm only for fbdev), and the driver holds
onto the last reference count until destruction.
- Created by userspace and associated with a given fd. These
framebuffers will be reaped when their assoiciated fb is closed.
Now these two cases are set up differently, the framebuffers are on
different lists and hence destruction needs to clean up different
things. Also, for userspace framebuffers we remove them from any
current usage, whereas for internal framebuffers it is assumed that
the driver has done this already.
Long story short, we need two different ways to cleanup such drivers.
Three functions are involved in total:
- drm_framebuffer_remove: Convenience function which removes the fb
from all active usage and then drops the passed-in reference.
- drm_framebuffer_unregister_private: Will remove driver-private
framebuffers from relevant lists and drop the corresponding
references. Should be called for driver-private framebuffers before
dropping the last reference (or like for a lot of the drivers where
the fbdev is embedded someplace else, before doing the cleanup
manually).
- drm_framebuffer_cleanup: Final cleanup for both classes of fbs,
should be called by the driver's ->destroy callback once the last
reference is gone.
This patch just rolls out the new interfaces and updates all drivers
(by adding calls to drm_framebuffer_unregister_private at all the
right places)- no functional changes yet. Follow-on patches will move
drm core code around and update the lifetime management for
framebuffers, so that we are no longer required to keep framebuffers
alive by locking mode_config.mutex.
I've also updated the kerneldoc already.
vmwgfx seems to again be a bit special, at least I haven't figured out
how the fbdev support in that driver works. It smells like it's
external though.
v2: The i915 driver creates another private framebuffer in the
load-detect code. Adjust its cleanup code, too.
Reviewed-by: Rob Clark <rob@ti.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-12-11 02:42:17 +07:00
|
|
|
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
|
2010-12-15 04:14:24 +07:00
|
|
|
drm_framebuffer_cleanup(&ufbdev->ufb.base);
|
|
|
|
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
int udl_fbdev_init(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
int bpp_sel = fb_bpp;
|
|
|
|
struct udl_fbdev *ufbdev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
|
|
|
|
if (!ufbdev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
udl->fbdev = ufbdev;
|
2014-06-27 22:19:24 +07:00
|
|
|
|
|
|
|
drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
ret = drm_fb_helper_init(dev, &ufbdev->helper,
|
|
|
|
1, 1);
|
2014-12-19 17:21:32 +07:00
|
|
|
if (ret)
|
|
|
|
goto free;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
2014-12-19 17:21:32 +07:00
|
|
|
ret = drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
|
|
|
|
if (ret)
|
|
|
|
goto fini;
|
2013-01-21 05:12:54 +07:00
|
|
|
|
|
|
|
/* disable all the possible outputs/crtcs before entering KMS mode */
|
|
|
|
drm_helper_disable_unused_functions(dev);
|
|
|
|
|
2014-12-19 17:21:32 +07:00
|
|
|
ret = drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
|
|
|
|
if (ret)
|
|
|
|
goto fini;
|
|
|
|
|
2010-12-15 04:14:24 +07:00
|
|
|
return 0;
|
2014-12-19 17:21:32 +07:00
|
|
|
|
|
|
|
fini:
|
|
|
|
drm_fb_helper_fini(&ufbdev->helper);
|
|
|
|
free:
|
|
|
|
kfree(ufbdev);
|
|
|
|
return ret;
|
2010-12-15 04:14:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void udl_fbdev_cleanup(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
if (!udl->fbdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
udl_fbdev_destroy(dev, udl->fbdev);
|
|
|
|
kfree(udl->fbdev);
|
|
|
|
udl->fbdev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void udl_fbdev_unplug(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct udl_device *udl = dev->dev_private;
|
|
|
|
struct udl_fbdev *ufbdev;
|
|
|
|
if (!udl->fbdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ufbdev = udl->fbdev;
|
2015-07-22 16:28:16 +07:00
|
|
|
drm_fb_helper_unlink_fbi(&ufbdev->helper);
|
2010-12-15 04:14:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct drm_framebuffer *
|
|
|
|
udl_fb_user_fb_create(struct drm_device *dev,
|
|
|
|
struct drm_file *file,
|
2015-11-12 00:11:29 +07:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd)
|
2010-12-15 04:14:24 +07:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct udl_framebuffer *ufb;
|
|
|
|
int ret;
|
2011-12-21 18:23:44 +07:00
|
|
|
uint32_t size;
|
2010-12-15 04:14:24 +07:00
|
|
|
|
|
|
|
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
|
|
|
|
if (obj == NULL)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
2011-12-21 18:23:44 +07:00
|
|
|
size = mode_cmd->pitches[0] * mode_cmd->height;
|
|
|
|
size = ALIGN(size, PAGE_SIZE);
|
|
|
|
|
|
|
|
if (size > obj->size) {
|
|
|
|
DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2010-12-15 04:14:24 +07:00
|
|
|
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
|
|
|
|
if (ufb == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
|
|
|
|
if (ret) {
|
|
|
|
kfree(ufb);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
return &ufb->base;
|
|
|
|
}
|