2012-04-17 21:01:25 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Matt Turner.
|
|
|
|
* Copyright 2012 Red Hat
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
|
|
* Public License version 2. See the file COPYING in the main
|
|
|
|
* directory of this archive for more details.
|
|
|
|
*
|
|
|
|
* Authors: Matthew Garrett
|
|
|
|
* Matt Turner
|
|
|
|
* Dave Airlie
|
|
|
|
*/
|
2012-10-03 00:01:07 +07:00
|
|
|
#include <drm/drmP.h>
|
|
|
|
#include <drm/drm_crtc_helper.h>
|
2012-04-17 21:01:25 +07:00
|
|
|
#include "mgag200_drv.h"
|
|
|
|
|
|
|
|
static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
|
|
|
{
|
|
|
|
struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
|
2016-07-22 16:20:27 +07:00
|
|
|
|
|
|
|
drm_gem_object_unreference_unlocked(mga_fb->obj);
|
2012-04-17 21:01:25 +07:00
|
|
|
drm_framebuffer_cleanup(fb);
|
|
|
|
kfree(fb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_framebuffer_funcs mga_fb_funcs = {
|
|
|
|
.destroy = mga_user_framebuffer_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
int mgag200_framebuffer_init(struct drm_device *dev,
|
|
|
|
struct mga_framebuffer *gfb,
|
2015-11-12 00:11:29 +07:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd,
|
2012-04-17 21:01:25 +07:00
|
|
|
struct drm_gem_object *obj)
|
|
|
|
{
|
2012-12-14 05:38:38 +07:00
|
|
|
int ret;
|
|
|
|
|
drm: Pass 'dev' to drm_helper_mode_fill_fb_struct()
Pass the drm_device to drm_helper_mode_fill_fb_struct() so that we can
populate fb->dev early. Will make it easier to use the fb before we
register it.
@@
identifier fb, mode_cmd;
@@
void drm_helper_mode_fill_fb_struct(
+ struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd
);
@@
identifier fb, mode_cmd;
@@
void drm_helper_mode_fill_fb_struct(
+ struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd
)
{ ... }
@@
function func;
identifier dev;
expression E1, E2;
@@
func(struct drm_device *dev, ...)
{
...
drm_helper_mode_fill_fb_struct(
+ dev,
E1, E2);
...
}
@@
expression E1, E2;
@@
drm_helper_mode_fill_fb_struct(
+ dev,
E1, E2);
v2: Rerun spatch due to code changes
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1481748539-18283-1-git-send-email-ville.syrjala@linux.intel.com
2016-12-15 03:48:59 +07:00
|
|
|
drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
|
2012-12-14 05:38:38 +07:00
|
|
|
gfb->obj = obj;
|
|
|
|
ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
|
2012-04-17 21:01:25 +07:00
|
|
|
if (ret) {
|
|
|
|
DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_framebuffer *
|
|
|
|
mgag200_user_framebuffer_create(struct drm_device *dev,
|
|
|
|
struct drm_file *filp,
|
2015-11-12 00:11:29 +07:00
|
|
|
const struct drm_mode_fb_cmd2 *mode_cmd)
|
2012-04-17 21:01:25 +07:00
|
|
|
{
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct mga_framebuffer *mga_fb;
|
|
|
|
int ret;
|
|
|
|
|
2016-05-09 17:04:54 +07:00
|
|
|
obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
|
2012-04-17 21:01:25 +07:00
|
|
|
if (obj == NULL)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
|
|
|
|
if (!mga_fb) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
|
|
|
|
if (ret) {
|
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
kfree(mga_fb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
return &mga_fb->base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_mode_config_funcs mga_mode_funcs = {
|
|
|
|
.fb_create = mgag200_user_framebuffer_create,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
|
|
|
|
{
|
|
|
|
int offset;
|
|
|
|
int orig;
|
|
|
|
int test1, test2;
|
|
|
|
int orig1, orig2;
|
2015-08-21 20:24:05 +07:00
|
|
|
unsigned int vram_size;
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
/* Probe */
|
|
|
|
orig = ioread16(mem);
|
|
|
|
iowrite16(0, mem);
|
|
|
|
|
2015-08-21 20:24:05 +07:00
|
|
|
vram_size = mdev->mc.vram_window;
|
|
|
|
|
|
|
|
if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
|
|
|
|
vram_size = vram_size - 0x400000;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
|
2012-04-17 21:01:25 +07:00
|
|
|
orig1 = ioread8(mem + offset);
|
|
|
|
orig2 = ioread8(mem + offset + 0x100);
|
|
|
|
|
|
|
|
iowrite16(0xaa55, mem + offset);
|
|
|
|
iowrite16(0xaa55, mem + offset + 0x100);
|
|
|
|
|
|
|
|
test1 = ioread16(mem + offset);
|
|
|
|
test2 = ioread16(mem);
|
|
|
|
|
|
|
|
iowrite16(orig1, mem + offset);
|
|
|
|
iowrite16(orig2, mem + offset + 0x100);
|
|
|
|
|
|
|
|
if (test1 != 0xaa55) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test2) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
iowrite16(orig, mem);
|
|
|
|
return offset - 65536;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the framebuffer from the card and configure the core */
|
|
|
|
static int mga_vram_init(struct mga_device *mdev)
|
|
|
|
{
|
|
|
|
void __iomem *mem;
|
|
|
|
struct apertures_struct *aper = alloc_apertures(1);
|
2012-11-09 16:19:36 +07:00
|
|
|
if (!aper)
|
|
|
|
return -ENOMEM;
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
/* BAR 0 is VRAM */
|
|
|
|
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
|
|
|
|
mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
|
|
|
|
|
|
|
|
aper->ranges[0].base = mdev->mc.vram_base;
|
|
|
|
aper->ranges[0].size = mdev->mc.vram_window;
|
|
|
|
|
2016-08-10 23:52:34 +07:00
|
|
|
drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
|
2012-11-09 16:19:37 +07:00
|
|
|
kfree(aper);
|
2012-04-17 21:01:25 +07:00
|
|
|
|
2013-04-05 23:15:30 +07:00
|
|
|
if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
|
2012-04-17 21:01:25 +07:00
|
|
|
"mgadrmfb_vram")) {
|
|
|
|
DRM_ERROR("can't reserve VRAM\n");
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
mem = pci_iomap(mdev->dev->pdev, 0, 0);
|
2017-01-03 18:30:27 +07:00
|
|
|
if (!mem)
|
|
|
|
return -ENOMEM;
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
mdev->mc.vram_size = mga_probe_vram(mdev, mem);
|
|
|
|
|
|
|
|
pci_iounmap(mdev->dev->pdev, mem);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mgag200_device_init(struct drm_device *dev,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
struct mga_device *mdev = dev->dev_private;
|
|
|
|
int ret, option;
|
|
|
|
|
|
|
|
mdev->type = flags;
|
|
|
|
|
|
|
|
/* Hardcode the number of CRTCs to 1 */
|
|
|
|
mdev->num_crtc = 1;
|
|
|
|
|
|
|
|
pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
|
|
|
|
mdev->has_sdram = !(option & (1 << 14));
|
|
|
|
|
|
|
|
/* BAR 0 is the framebuffer, BAR 1 contains registers */
|
|
|
|
mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
|
|
|
|
mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
|
|
|
|
|
2013-04-05 23:15:30 +07:00
|
|
|
if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
|
2012-04-17 21:01:25 +07:00
|
|
|
"mgadrmfb_mmio")) {
|
|
|
|
DRM_ERROR("can't reserve mmio registers\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2013-04-05 23:15:30 +07:00
|
|
|
mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
|
2012-04-17 21:01:25 +07:00
|
|
|
if (mdev->rmmio == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* stash G200 SE model number for later use */
|
|
|
|
if (IS_G200_SE(mdev))
|
2013-06-28 00:38:59 +07:00
|
|
|
mdev->unique_rev_id = RREG32(0x1e24);
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
ret = mga_vram_init(mdev);
|
2013-04-05 23:15:30 +07:00
|
|
|
if (ret)
|
2012-04-17 21:01:25 +07:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
mdev->bpp_shifts[0] = 0;
|
|
|
|
mdev->bpp_shifts[1] = 1;
|
|
|
|
mdev->bpp_shifts[2] = 0;
|
|
|
|
mdev->bpp_shifts[3] = 2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Functions here will be called by the core once it's bound the driver to
|
|
|
|
* a PCI device
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
|
|
|
|
{
|
|
|
|
struct mga_device *mdev;
|
|
|
|
int r;
|
|
|
|
|
2013-04-05 23:15:30 +07:00
|
|
|
mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
|
2012-04-17 21:01:25 +07:00
|
|
|
if (mdev == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
dev->dev_private = (void *)mdev;
|
|
|
|
mdev->dev = dev;
|
|
|
|
|
|
|
|
r = mgag200_device_init(dev, flags);
|
|
|
|
if (r) {
|
|
|
|
dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
|
2013-06-05 22:29:57 +07:00
|
|
|
return r;
|
2012-04-17 21:01:25 +07:00
|
|
|
}
|
|
|
|
r = mgag200_mm_init(mdev);
|
|
|
|
if (r)
|
2015-09-17 18:00:55 +07:00
|
|
|
goto err_mm;
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
drm_mode_config_init(dev);
|
|
|
|
dev->mode_config.funcs = (void *)&mga_mode_funcs;
|
2014-01-21 13:47:46 +07:00
|
|
|
if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
|
|
|
|
dev->mode_config.preferred_depth = 16;
|
|
|
|
else
|
|
|
|
dev->mode_config.preferred_depth = 24;
|
2012-04-17 21:01:25 +07:00
|
|
|
dev->mode_config.prefer_shadow = 1;
|
|
|
|
|
|
|
|
r = mgag200_modeset_init(mdev);
|
drm/mgag200: Hardware cursor support
G200 cards support, at best, 16 colour palleted images for the cursor
so we do a conversion in the cursor_set function, and reject cursors
with more than 16 colours, or cursors with partial transparency. Xorg
falls back gracefully to software cursors in this case.
We can't disable/enable the cursor hardware without causing momentary
corruption around the cursor. Instead, once the cursor is on we leave
it on, and simulate turning the cursor off by moving it
offscreen. This works well.
Since we can't disable -> update -> enable the cursors, we double
buffer cursor icons, then just move the base address that points to
the old cursor, to the new. This also works well, but uses an extra
page of memory.
The cursor buffers are lazily-allocated on first cursor_set. This is
to make sure they don't take priority over any framebuffers in case of
limited memory.
Here is a representation of how the bitmap for the cursor is mapped in G200 memory :
Each line of color cursor use 6 Slices of 8 bytes. Slices 0 to 3
are used for the 4bpp bitmap, slice 4 for XOR mask and slice 5 for
AND mask. Each line has the following format:
// Byte 0 Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6 Byte 7
//
// S0: P00-01 P02-03 P04-05 P06-07 P08-09 P10-11 P12-13 P14-15
// S1: P16-17 P18-19 P20-21 P22-23 P24-25 P26-27 P28-29 P30-31
// S2: P32-33 P34-35 P36-37 P38-39 P40-41 P42-43 P44-45 P46-47
// S3: P48-49 P50-51 P52-53 P54-55 P56-57 P58-59 P60-61 P62-63
// S4: X63-56 X55-48 X47-40 X39-32 X31-24 X23-16 X15-08 X07-00
// S5: A63-56 A55-48 A47-40 A39-32 A31-24 A23-16 A15-08 A07-00
//
// S0 to S5 = Slices 0 to 5
// P00 to P63 = Bitmap - pixels 0 to 63
// X00 to X63 = always 0 - pixels 0 to 63
// A00 to A63 = transparent markers - pixels 0 to 63
// 1 means colour, 0 means transparent
Signed-off-by: Christopher Harvey <charvey@matrox.com>
Signed-off-by: Mathieu Larouche <mathieu.larouche@matrox.com>
Acked-by: Julia Lemire <jlemire@matrox.com>
Tested-by: Julia Lemire <jlemire@matrox.com>
Signed-off-by: Dave Airlie <airlied@gmail.com>
2013-06-06 02:24:26 +07:00
|
|
|
if (r) {
|
2012-04-17 21:01:25 +07:00
|
|
|
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
|
2015-09-17 18:00:55 +07:00
|
|
|
goto err_modeset;
|
drm/mgag200: Hardware cursor support
G200 cards support, at best, 16 colour palleted images for the cursor
so we do a conversion in the cursor_set function, and reject cursors
with more than 16 colours, or cursors with partial transparency. Xorg
falls back gracefully to software cursors in this case.
We can't disable/enable the cursor hardware without causing momentary
corruption around the cursor. Instead, once the cursor is on we leave
it on, and simulate turning the cursor off by moving it
offscreen. This works well.
Since we can't disable -> update -> enable the cursors, we double
buffer cursor icons, then just move the base address that points to
the old cursor, to the new. This also works well, but uses an extra
page of memory.
The cursor buffers are lazily-allocated on first cursor_set. This is
to make sure they don't take priority over any framebuffers in case of
limited memory.
Here is a representation of how the bitmap for the cursor is mapped in G200 memory :
Each line of color cursor use 6 Slices of 8 bytes. Slices 0 to 3
are used for the 4bpp bitmap, slice 4 for XOR mask and slice 5 for
AND mask. Each line has the following format:
// Byte 0 Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6 Byte 7
//
// S0: P00-01 P02-03 P04-05 P06-07 P08-09 P10-11 P12-13 P14-15
// S1: P16-17 P18-19 P20-21 P22-23 P24-25 P26-27 P28-29 P30-31
// S2: P32-33 P34-35 P36-37 P38-39 P40-41 P42-43 P44-45 P46-47
// S3: P48-49 P50-51 P52-53 P54-55 P56-57 P58-59 P60-61 P62-63
// S4: X63-56 X55-48 X47-40 X39-32 X31-24 X23-16 X15-08 X07-00
// S5: A63-56 A55-48 A47-40 A39-32 A31-24 A23-16 A15-08 A07-00
//
// S0 to S5 = Slices 0 to 5
// P00 to P63 = Bitmap - pixels 0 to 63
// X00 to X63 = always 0 - pixels 0 to 63
// A00 to A63 = transparent markers - pixels 0 to 63
// 1 means colour, 0 means transparent
Signed-off-by: Christopher Harvey <charvey@matrox.com>
Signed-off-by: Mathieu Larouche <mathieu.larouche@matrox.com>
Acked-by: Julia Lemire <jlemire@matrox.com>
Tested-by: Julia Lemire <jlemire@matrox.com>
Signed-off-by: Dave Airlie <airlied@gmail.com>
2013-06-06 02:24:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Make small buffers to store a hardware cursor (double buffered icon updates) */
|
|
|
|
mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
|
|
|
|
&mdev->cursor.pixels_1);
|
|
|
|
mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
|
|
|
|
&mdev->cursor.pixels_2);
|
2015-09-17 18:00:55 +07:00
|
|
|
if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
|
|
|
|
mdev->cursor.pixels_1 = NULL;
|
|
|
|
mdev->cursor.pixels_2 = NULL;
|
|
|
|
dev_warn(&dev->pdev->dev,
|
|
|
|
"Could not allocate space for cursors. Not doing hardware cursors.\n");
|
|
|
|
} else {
|
|
|
|
mdev->cursor.pixels_current = mdev->cursor.pixels_1;
|
|
|
|
mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_modeset:
|
|
|
|
drm_mode_config_cleanup(dev);
|
|
|
|
mgag200_mm_fini(mdev);
|
|
|
|
err_mm:
|
|
|
|
dev->dev_private = NULL;
|
|
|
|
|
2012-04-17 21:01:25 +07:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-01-07 00:57:31 +07:00
|
|
|
void mgag200_driver_unload(struct drm_device *dev)
|
2012-04-17 21:01:25 +07:00
|
|
|
{
|
|
|
|
struct mga_device *mdev = dev->dev_private;
|
|
|
|
|
|
|
|
if (mdev == NULL)
|
2017-01-07 00:57:31 +07:00
|
|
|
return;
|
2012-04-17 21:01:25 +07:00
|
|
|
mgag200_modeset_fini(mdev);
|
|
|
|
mgag200_fbdev_fini(mdev);
|
|
|
|
drm_mode_config_cleanup(dev);
|
|
|
|
mgag200_mm_fini(mdev);
|
|
|
|
dev->dev_private = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgag200_gem_create(struct drm_device *dev,
|
|
|
|
u32 size, bool iskernel,
|
|
|
|
struct drm_gem_object **obj)
|
|
|
|
{
|
|
|
|
struct mgag200_bo *astbo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
*obj = NULL;
|
|
|
|
|
|
|
|
size = roundup(size, PAGE_SIZE);
|
|
|
|
if (size == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
|
|
|
|
if (ret) {
|
|
|
|
if (ret != -ERESTARTSYS)
|
|
|
|
DRM_ERROR("failed to allocate GEM object\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
*obj = &astbo->gem;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgag200_dumb_create(struct drm_file *file,
|
|
|
|
struct drm_device *dev,
|
|
|
|
struct drm_mode_create_dumb *args)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct drm_gem_object *gobj;
|
|
|
|
u32 handle;
|
|
|
|
|
|
|
|
args->pitch = args->width * ((args->bpp + 7) / 8);
|
|
|
|
args->size = args->pitch * args->height;
|
|
|
|
|
|
|
|
ret = mgag200_gem_create(dev, args->size, false,
|
|
|
|
&gobj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file, gobj, &handle);
|
|
|
|
drm_gem_object_unreference_unlocked(gobj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
args->handle = handle;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-06 22:06:15 +07:00
|
|
|
static void mgag200_bo_unref(struct mgag200_bo **bo)
|
2012-04-17 21:01:25 +07:00
|
|
|
{
|
|
|
|
struct ttm_buffer_object *tbo;
|
|
|
|
|
|
|
|
if ((*bo) == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tbo = &((*bo)->bo);
|
|
|
|
ttm_bo_unref(&tbo);
|
2014-04-05 15:03:22 +07:00
|
|
|
*bo = NULL;
|
2012-04-17 21:01:25 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void mgag200_gem_free_object(struct drm_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
|
|
|
|
|
|
|
|
mgag200_bo_unref(&mgag200_bo);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
|
|
|
|
{
|
2013-07-25 02:08:53 +07:00
|
|
|
return drm_vma_node_offset_addr(&bo->bo.vma_node);
|
2012-04-17 21:01:25 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
mgag200_dumb_mmap_offset(struct drm_file *file,
|
|
|
|
struct drm_device *dev,
|
|
|
|
uint32_t handle,
|
|
|
|
uint64_t *offset)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *obj;
|
|
|
|
struct mgag200_bo *bo;
|
|
|
|
|
2016-05-09 17:04:54 +07:00
|
|
|
obj = drm_gem_object_lookup(file, handle);
|
2015-07-10 04:32:38 +07:00
|
|
|
if (obj == NULL)
|
|
|
|
return -ENOENT;
|
2012-04-17 21:01:25 +07:00
|
|
|
|
|
|
|
bo = gem_to_mga_bo(obj);
|
|
|
|
*offset = mgag200_bo_mmap_offset(bo);
|
|
|
|
|
2015-07-10 04:32:38 +07:00
|
|
|
drm_gem_object_unreference_unlocked(obj);
|
|
|
|
return 0;
|
2012-04-17 21:01:25 +07:00
|
|
|
}
|