linux_dsm_epyc7002/drivers/gpu/drm/cirrus/cirrus_fbdev.c

317 lines
7.8 KiB
C
Raw Normal View History

/*
* Copyright 2012 Red Hat
*
* This file is subject to the terms and conditions of the GNU General
* Public License version 2. See the file COPYING in the main
* directory of this archive for more details.
*
* Authors: Matthew Garrett
* Dave Airlie
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include "cirrus_drv.h"
static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
int x, int y, int width, int height)
{
int i;
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
drm/cirrus: do not attempt to acquire a reservation while in an interrupt handler Mutexes should not be acquired in interrupt context. While the trylock fastpath is arguably safe on all implementations, the slowpath unlock path definitely isn't. This fixes the following lockdep splat: [ 13.044313] ------------[ cut here ]------------ [ 13.044367] WARNING: at /c/kernel-tests/src/tip/kernel/mutex.c:858 mutex_trylock+0x87/0x220() [ 13.044378] DEBUG_LOCKS_WARN_ON(in_interrupt()) [ 13.044378] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.10.0-rc4-00296-ga2963dd #20 [ 13.044379] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007 [ 13.044390] 0000000000000009 ffff88000de039f8 ffffffff81fc86d5 ffff88000de03a38 [ 13.044395] ffffffff810d511b ffff880000000018 ffff88000f33c690 0000000000000001 [ 13.044398] 00000000000003f0 ffff88000f4677c8 0000000000000000 ffff88000de03a98 [ 13.044400] Call Trace: [ 13.044412] <IRQ> [<ffffffff81fc86d5>] dump_stack+0x19/0x1b [ 13.044441] [<ffffffff810d511b>] warn_slowpath_common+0x6b/0x90 [ 13.044445] [<ffffffff810d51a6>] warn_slowpath_fmt+0x46/0x50 [ 13.044448] [<ffffffff81fd34d7>] mutex_trylock+0x87/0x220 [ 13.044482] [<ffffffff8186484d>] cirrus_dirty_update+0x1cd/0x330 [ 13.044486] [<ffffffff818649e8>] cirrus_imageblit+0x38/0x50 [ 13.044506] [<ffffffff8165782e>] soft_cursor+0x22e/0x240 [ 13.044510] [<ffffffff81656c31>] bit_cursor+0x581/0x5b0 [ 13.044525] [<ffffffff815de9f4>] ? vsnprintf+0x124/0x670 [ 13.044529] [<ffffffff81651333>] ? get_color.isra.16+0x43/0x130 [ 13.044532] [<ffffffff81653fca>] fbcon_cursor+0x18a/0x1d0 [ 13.044535] [<ffffffff816566b0>] ? update_attr.isra.2+0xa0/0xa0 [ 13.044556] [<ffffffff81754b82>] hide_cursor+0x32/0xa0 [ 13.044565] [<ffffffff81755bd3>] vt_console_print+0x103/0x3b0 [ 13.044569] [<ffffffff810d58ac>] ? print_time+0x9c/0xb0 [ 13.044576] [<ffffffff810d5960>] ? print_prefix+0xa0/0xc0 [ 13.044580] [<ffffffff810d63f6>] call_console_drivers.constprop.6+0x146/0x1f0 [ 13.044593] [<ffffffff815f9b38>] ? do_raw_spin_unlock+0xc8/0x100 [ 13.044597] [<ffffffff810d6f27>] console_unlock+0x2f7/0x460 [ 13.044600] [<ffffffff810d787a>] vprintk_emit+0x59a/0x5e0 [ 13.044615] [<ffffffff81fb676c>] printk+0x4d/0x4f [ 13.044650] [<ffffffff82ba5511>] print_local_APIC+0x28/0x41c [ 13.044672] [<ffffffff8114db55>] generic_smp_call_function_single_interrupt+0x145/0x2b0 [ 13.044688] [<ffffffff8106f9e7>] smp_call_function_single_interrupt+0x27/0x40 [ 13.044697] [<ffffffff81fd8f72>] call_function_single_interrupt+0x72/0x80 [ 13.044707] <EOI> [<ffffffff81078166>] ? native_safe_halt+0x6/0x10 [ 13.044717] [<ffffffff811425cd>] ? trace_hardirqs_on+0xd/0x10 [ 13.044738] [<ffffffff8104f669>] default_idle+0x59/0x120 [ 13.044742] [<ffffffff810501e8>] arch_cpu_idle+0x18/0x40 [ 13.044754] [<ffffffff811320c5>] cpu_startup_entry+0x235/0x410 [ 13.044763] [<ffffffff81f9e781>] rest_init+0xd1/0xe0 [ 13.044766] [<ffffffff81f9e6b5>] ? rest_init+0x5/0xe0 [ 13.044778] [<ffffffff82b93ec2>] start_kernel+0x425/0x493 [ 13.044781] [<ffffffff82b93810>] ? repair_env_string+0x5e/0x5e [ 13.044786] [<ffffffff82b93595>] x86_64_start_reservations+0x2a/0x2c [ 13.044789] [<ffffffff82b93688>] x86_64_start_kernel+0xf1/0x100 [ 13.044799] ---[ end trace 113ad28772af4058 ]--- Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-06-27 18:38:24 +07:00
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
obj = afbdev->gfb.obj;
bo = gem_to_cirrus_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
if (drm_can_sleep())
drm/cirrus: do not attempt to acquire a reservation while in an interrupt handler Mutexes should not be acquired in interrupt context. While the trylock fastpath is arguably safe on all implementations, the slowpath unlock path definitely isn't. This fixes the following lockdep splat: [ 13.044313] ------------[ cut here ]------------ [ 13.044367] WARNING: at /c/kernel-tests/src/tip/kernel/mutex.c:858 mutex_trylock+0x87/0x220() [ 13.044378] DEBUG_LOCKS_WARN_ON(in_interrupt()) [ 13.044378] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.10.0-rc4-00296-ga2963dd #20 [ 13.044379] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007 [ 13.044390] 0000000000000009 ffff88000de039f8 ffffffff81fc86d5 ffff88000de03a38 [ 13.044395] ffffffff810d511b ffff880000000018 ffff88000f33c690 0000000000000001 [ 13.044398] 00000000000003f0 ffff88000f4677c8 0000000000000000 ffff88000de03a98 [ 13.044400] Call Trace: [ 13.044412] <IRQ> [<ffffffff81fc86d5>] dump_stack+0x19/0x1b [ 13.044441] [<ffffffff810d511b>] warn_slowpath_common+0x6b/0x90 [ 13.044445] [<ffffffff810d51a6>] warn_slowpath_fmt+0x46/0x50 [ 13.044448] [<ffffffff81fd34d7>] mutex_trylock+0x87/0x220 [ 13.044482] [<ffffffff8186484d>] cirrus_dirty_update+0x1cd/0x330 [ 13.044486] [<ffffffff818649e8>] cirrus_imageblit+0x38/0x50 [ 13.044506] [<ffffffff8165782e>] soft_cursor+0x22e/0x240 [ 13.044510] [<ffffffff81656c31>] bit_cursor+0x581/0x5b0 [ 13.044525] [<ffffffff815de9f4>] ? vsnprintf+0x124/0x670 [ 13.044529] [<ffffffff81651333>] ? get_color.isra.16+0x43/0x130 [ 13.044532] [<ffffffff81653fca>] fbcon_cursor+0x18a/0x1d0 [ 13.044535] [<ffffffff816566b0>] ? update_attr.isra.2+0xa0/0xa0 [ 13.044556] [<ffffffff81754b82>] hide_cursor+0x32/0xa0 [ 13.044565] [<ffffffff81755bd3>] vt_console_print+0x103/0x3b0 [ 13.044569] [<ffffffff810d58ac>] ? print_time+0x9c/0xb0 [ 13.044576] [<ffffffff810d5960>] ? print_prefix+0xa0/0xc0 [ 13.044580] [<ffffffff810d63f6>] call_console_drivers.constprop.6+0x146/0x1f0 [ 13.044593] [<ffffffff815f9b38>] ? do_raw_spin_unlock+0xc8/0x100 [ 13.044597] [<ffffffff810d6f27>] console_unlock+0x2f7/0x460 [ 13.044600] [<ffffffff810d787a>] vprintk_emit+0x59a/0x5e0 [ 13.044615] [<ffffffff81fb676c>] printk+0x4d/0x4f [ 13.044650] [<ffffffff82ba5511>] print_local_APIC+0x28/0x41c [ 13.044672] [<ffffffff8114db55>] generic_smp_call_function_single_interrupt+0x145/0x2b0 [ 13.044688] [<ffffffff8106f9e7>] smp_call_function_single_interrupt+0x27/0x40 [ 13.044697] [<ffffffff81fd8f72>] call_function_single_interrupt+0x72/0x80 [ 13.044707] <EOI> [<ffffffff81078166>] ? native_safe_halt+0x6/0x10 [ 13.044717] [<ffffffff811425cd>] ? trace_hardirqs_on+0xd/0x10 [ 13.044738] [<ffffffff8104f669>] default_idle+0x59/0x120 [ 13.044742] [<ffffffff810501e8>] arch_cpu_idle+0x18/0x40 [ 13.044754] [<ffffffff811320c5>] cpu_startup_entry+0x235/0x410 [ 13.044763] [<ffffffff81f9e781>] rest_init+0xd1/0xe0 [ 13.044766] [<ffffffff81f9e6b5>] ? rest_init+0x5/0xe0 [ 13.044778] [<ffffffff82b93ec2>] start_kernel+0x425/0x493 [ 13.044781] [<ffffffff82b93810>] ? repair_env_string+0x5e/0x5e [ 13.044786] [<ffffffff82b93595>] x86_64_start_reservations+0x2a/0x2c [ 13.044789] [<ffffffff82b93688>] x86_64_start_kernel+0xf1/0x100 [ 13.044799] ---[ end trace 113ad28772af4058 ]--- Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-06-27 18:38:24 +07:00
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&afbdev->dirty_lock, flags);
if (afbdev->y1 < y)
y = afbdev->y1;
if (afbdev->y2 > y2)
y2 = afbdev->y2;
if (afbdev->x1 < x)
x = afbdev->x1;
if (afbdev->x2 > x2)
x2 = afbdev->x2;
if (store_for_later) {
afbdev->x1 = x;
afbdev->x2 = x2;
afbdev->y1 = y;
afbdev->y2 = y2;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
return;
}
afbdev->x1 = afbdev->y1 = INT_MAX;
afbdev->x2 = afbdev->y2 = 0;
spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
DRM_ERROR("failed to kmap fb updates\n");
cirrus_bo_unreserve(bo);
return;
}
unmap = true;
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
if (unmap)
ttm_bo_kunmap(&bo->kmap);
cirrus_bo_unreserve(bo);
}
static void cirrus_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct cirrus_fbdev *afbdev = info->par;
drm_fb_helper_sys_fillrect(info, rect);
cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
static void cirrus_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct cirrus_fbdev *afbdev = info->par;
drm_fb_helper_sys_copyarea(info, area);
cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
area->height);
}
static void cirrus_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct cirrus_fbdev *afbdev = info->par;
drm_fb_helper_sys_imageblit(info, image);
cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
image->height);
}
static struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cirrus_fillrect,
.fb_copyarea = cirrus_copyarea,
.fb_imageblit = cirrus_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
};
static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
int ret = 0;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))
return -EINVAL;
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = cirrus_gem_create(dev, size, true, &gobj);
if (ret)
return ret;
*gobj_p = gobj;
return ret;
}
static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct cirrus_fbdev *gfbdev =
container_of(helper, struct cirrus_fbdev, helper);
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct cirrus_bo *bo = NULL;
int size, ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_cirrus_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info))
return PTR_ERR(info);
info->par = gfbdev;
ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
if (ret)
return ret;
gfbdev->sysram = sysram;
gfbdev->size = size;
fb = &gfbdev->gfb.base;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
}
/* setup helper */
gfbdev->helper.fb = fb;
strcpy(info->fix.id, "cirrusdrmfb");
info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
info->fix.smem_start = cdev->dev->mode_config.fb_base;
info->fix.smem_len = cdev->mc.vram_size;
info->screen_base = sysram;
info->screen_size = size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
}
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
struct cirrus_framebuffer *gfb = &gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
drm_fb_helper_release_fbi(&gfbdev->helper);
if (gfb->obj) {
drm_gem_object_unreference_unlocked(gfb->obj);
gfb->obj = NULL;
}
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
drm: revamp framebuffer cleanup interfaces We have two classes of framebuffer - Created by the driver (atm only for fbdev), and the driver holds onto the last reference count until destruction. - Created by userspace and associated with a given fd. These framebuffers will be reaped when their assoiciated fb is closed. Now these two cases are set up differently, the framebuffers are on different lists and hence destruction needs to clean up different things. Also, for userspace framebuffers we remove them from any current usage, whereas for internal framebuffers it is assumed that the driver has done this already. Long story short, we need two different ways to cleanup such drivers. Three functions are involved in total: - drm_framebuffer_remove: Convenience function which removes the fb from all active usage and then drops the passed-in reference. - drm_framebuffer_unregister_private: Will remove driver-private framebuffers from relevant lists and drop the corresponding references. Should be called for driver-private framebuffers before dropping the last reference (or like for a lot of the drivers where the fbdev is embedded someplace else, before doing the cleanup manually). - drm_framebuffer_cleanup: Final cleanup for both classes of fbs, should be called by the driver's ->destroy callback once the last reference is gone. This patch just rolls out the new interfaces and updates all drivers (by adding calls to drm_framebuffer_unregister_private at all the right places)- no functional changes yet. Follow-on patches will move drm core code around and update the lifetime management for framebuffers, so that we are no longer required to keep framebuffers alive by locking mode_config.mutex. I've also updated the kerneldoc already. vmwgfx seems to again be a bit special, at least I haven't figured out how the fbdev support in that driver works. It smells like it's external though. v2: The i915 driver creates another private framebuffer in the load-detect code. Adjust its cleanup code, too. Reviewed-by: Rob Clark <rob@ti.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
2012-12-11 02:42:17 +07:00
drm_framebuffer_unregister_private(&gfb->base);
drm_framebuffer_cleanup(&gfb->base);
return 0;
}
static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
.fb_probe = cirrusfb_create,
};
int cirrus_fbdev_init(struct cirrus_device *cdev)
{
struct cirrus_fbdev *gfbdev;
int ret;
int bpp_sel = 24;
/*bpp_sel = 8;*/
gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
if (!gfbdev)
return -ENOMEM;
cdev->mode_info.gfbdev = gfbdev;
spin_lock_init(&gfbdev->dirty_lock);
drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
&cirrus_fb_helper_funcs);
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
if (ret)
return ret;
ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
if (ret)
return ret;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)
{
if (!cdev->mode_info.gfbdev)
return;
cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
kfree(cdev->mode_info.gfbdev);
cdev->mode_info.gfbdev = NULL;
}