2009-09-16 03:57:34 +07:00
|
|
|
/*
|
|
|
|
* Copyright © 2009
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Daniel Vetter <daniel@ffwll.ch>
|
|
|
|
*
|
|
|
|
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
|
|
|
|
*/
|
2019-04-29 19:29:31 +07:00
|
|
|
|
2019-01-18 04:03:34 +07:00
|
|
|
#include <drm/drm_fourcc.h>
|
2019-04-29 19:29:31 +07:00
|
|
|
#include <drm/i915_drm.h>
|
2019-01-18 04:03:34 +07:00
|
|
|
|
2019-05-28 16:29:49 +07:00
|
|
|
#include "gem/i915_gem_pm.h"
|
2019-10-24 17:03:44 +07:00
|
|
|
#include "gt/intel_ring.h"
|
2019-05-28 16:29:49 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_reg.h"
|
2019-08-06 18:39:33 +07:00
|
|
|
#include "intel_display_types.h"
|
2016-08-04 22:32:35 +07:00
|
|
|
#include "intel_frontbuffer.h"
|
2019-04-29 19:29:31 +07:00
|
|
|
#include "intel_overlay.h"
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
/* Limits for overlay size. According to intel doc, the real limits are:
|
|
|
|
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
|
|
|
|
* UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
|
|
|
|
* the mininum of both. */
|
|
|
|
#define IMAGE_MAX_WIDTH 2048
|
|
|
|
#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
|
|
|
|
/* on 830 and 845 these large limits result in the card hanging */
|
|
|
|
#define IMAGE_MAX_WIDTH_LEGACY 1024
|
|
|
|
#define IMAGE_MAX_HEIGHT_LEGACY 1088
|
|
|
|
|
|
|
|
/* overlay register definitions */
|
|
|
|
/* OCMD register */
|
|
|
|
#define OCMD_TILED_SURFACE (0x1<<19)
|
|
|
|
#define OCMD_MIRROR_MASK (0x3<<17)
|
|
|
|
#define OCMD_MIRROR_MODE (0x3<<17)
|
|
|
|
#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
|
|
|
|
#define OCMD_MIRROR_VERTICAL (0x2<<17)
|
|
|
|
#define OCMD_MIRROR_BOTH (0x3<<17)
|
|
|
|
#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
|
|
|
|
#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
|
|
|
|
#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
|
|
|
|
#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
|
|
|
|
#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
|
|
|
|
#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
|
|
|
|
#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
|
|
|
|
#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
|
|
|
|
#define OCMD_YUV_422_PACKED (0x8<<10)
|
|
|
|
#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
|
|
|
|
#define OCMD_YUV_420_PLANAR (0xc<<10)
|
|
|
|
#define OCMD_YUV_422_PLANAR (0xd<<10)
|
|
|
|
#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
|
|
|
|
#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
|
|
|
|
#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
|
2010-07-13 19:52:17 +07:00
|
|
|
#define OCMD_BUF_TYPE_MASK (0x1<<5)
|
2009-09-16 03:57:34 +07:00
|
|
|
#define OCMD_BUF_TYPE_FRAME (0x0<<5)
|
|
|
|
#define OCMD_BUF_TYPE_FIELD (0x1<<5)
|
|
|
|
#define OCMD_TEST_MODE (0x1<<4)
|
|
|
|
#define OCMD_BUFFER_SELECT (0x3<<2)
|
|
|
|
#define OCMD_BUFFER0 (0x0<<2)
|
|
|
|
#define OCMD_BUFFER1 (0x1<<2)
|
|
|
|
#define OCMD_FIELD_SELECT (0x1<<2)
|
|
|
|
#define OCMD_FIELD0 (0x0<<1)
|
|
|
|
#define OCMD_FIELD1 (0x1<<1)
|
|
|
|
#define OCMD_ENABLE (0x1<<0)
|
|
|
|
|
|
|
|
/* OCONFIG register */
|
|
|
|
#define OCONF_PIPE_MASK (0x1<<18)
|
|
|
|
#define OCONF_PIPE_A (0x0<<18)
|
|
|
|
#define OCONF_PIPE_B (0x1<<18)
|
|
|
|
#define OCONF_GAMMA2_ENABLE (0x1<<16)
|
|
|
|
#define OCONF_CSC_MODE_BT601 (0x0<<5)
|
|
|
|
#define OCONF_CSC_MODE_BT709 (0x1<<5)
|
|
|
|
#define OCONF_CSC_BYPASS (0x1<<4)
|
|
|
|
#define OCONF_CC_OUT_8BIT (0x1<<3)
|
|
|
|
#define OCONF_TEST_MODE (0x1<<2)
|
|
|
|
#define OCONF_THREE_LINE_BUFFER (0x1<<0)
|
|
|
|
#define OCONF_TWO_LINE_BUFFER (0x0<<0)
|
|
|
|
|
|
|
|
/* DCLRKM (dst-key) register */
|
|
|
|
#define DST_KEY_ENABLE (0x1<<31)
|
|
|
|
#define CLK_RGB24_MASK 0x0
|
|
|
|
#define CLK_RGB16_MASK 0x070307
|
|
|
|
#define CLK_RGB15_MASK 0x070707
|
|
|
|
#define CLK_RGB8I_MASK 0xffffff
|
|
|
|
|
|
|
|
#define RGB16_TO_COLORKEY(c) \
|
|
|
|
(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
|
|
|
|
#define RGB15_TO_COLORKEY(c) \
|
|
|
|
(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
|
|
|
|
|
|
|
|
/* overlay flip addr flag */
|
|
|
|
#define OFC_UPDATE 0x1
|
|
|
|
|
|
|
|
/* polyphase filter coefficients */
|
|
|
|
#define N_HORIZ_Y_TAPS 5
|
|
|
|
#define N_VERT_Y_TAPS 3
|
|
|
|
#define N_HORIZ_UV_TAPS 3
|
|
|
|
#define N_VERT_UV_TAPS 3
|
|
|
|
#define N_PHASES 17
|
|
|
|
#define MAX_TAPS 5
|
|
|
|
|
|
|
|
/* memory bufferd overlay registers */
|
|
|
|
struct overlay_registers {
|
2011-08-17 02:34:10 +07:00
|
|
|
u32 OBUF_0Y;
|
|
|
|
u32 OBUF_1Y;
|
|
|
|
u32 OBUF_0U;
|
|
|
|
u32 OBUF_0V;
|
|
|
|
u32 OBUF_1U;
|
|
|
|
u32 OBUF_1V;
|
|
|
|
u32 OSTRIDE;
|
|
|
|
u32 YRGB_VPH;
|
|
|
|
u32 UV_VPH;
|
|
|
|
u32 HORZ_PH;
|
|
|
|
u32 INIT_PHS;
|
|
|
|
u32 DWINPOS;
|
|
|
|
u32 DWINSZ;
|
|
|
|
u32 SWIDTH;
|
|
|
|
u32 SWIDTHSW;
|
|
|
|
u32 SHEIGHT;
|
|
|
|
u32 YRGBSCALE;
|
|
|
|
u32 UVSCALE;
|
|
|
|
u32 OCLRC0;
|
|
|
|
u32 OCLRC1;
|
|
|
|
u32 DCLRKV;
|
|
|
|
u32 DCLRKM;
|
|
|
|
u32 SCLRKVH;
|
|
|
|
u32 SCLRKVL;
|
|
|
|
u32 SCLRKEN;
|
|
|
|
u32 OCONFIG;
|
|
|
|
u32 OCMD;
|
|
|
|
u32 RESERVED1; /* 0x6C */
|
|
|
|
u32 OSTART_0Y;
|
|
|
|
u32 OSTART_1Y;
|
|
|
|
u32 OSTART_0U;
|
|
|
|
u32 OSTART_0V;
|
|
|
|
u32 OSTART_1U;
|
|
|
|
u32 OSTART_1V;
|
|
|
|
u32 OTILEOFF_0Y;
|
|
|
|
u32 OTILEOFF_1Y;
|
|
|
|
u32 OTILEOFF_0U;
|
|
|
|
u32 OTILEOFF_0V;
|
|
|
|
u32 OTILEOFF_1U;
|
|
|
|
u32 OTILEOFF_1V;
|
|
|
|
u32 FASTHSCALE; /* 0xA0 */
|
|
|
|
u32 UVSCALEV; /* 0xA4 */
|
|
|
|
u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
|
|
|
|
u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
|
|
|
|
u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
|
|
|
|
u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
|
|
|
|
u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
|
|
|
|
u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
|
|
|
|
u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
|
|
|
|
u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
|
|
|
|
u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
|
2009-09-16 03:57:34 +07:00
|
|
|
};
|
|
|
|
|
2010-08-12 19:53:37 +07:00
|
|
|
struct intel_overlay {
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *i915;
|
2019-07-05 03:04:53 +07:00
|
|
|
struct intel_context *context;
|
2010-08-12 19:53:37 +07:00
|
|
|
struct intel_crtc *crtc;
|
2016-08-15 16:49:01 +07:00
|
|
|
struct i915_vma *vma;
|
|
|
|
struct i915_vma *old_vma;
|
2015-03-31 14:37:23 +07:00
|
|
|
bool active;
|
|
|
|
bool pfit_active;
|
2010-08-12 19:53:37 +07:00
|
|
|
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
|
2015-04-02 16:35:08 +07:00
|
|
|
u32 color_key:24;
|
|
|
|
u32 color_key_enabled:1;
|
2010-08-12 19:53:37 +07:00
|
|
|
u32 brightness, contrast, saturation;
|
|
|
|
u32 old_xscale, old_yscale;
|
|
|
|
/* register access */
|
|
|
|
struct drm_i915_gem_object *reg_bo;
|
2018-09-07 02:01:43 +07:00
|
|
|
struct overlay_registers __iomem *regs;
|
|
|
|
u32 flip_addr;
|
2010-08-12 19:53:37 +07:00
|
|
|
/* flip handling */
|
2019-08-13 00:48:04 +07:00
|
|
|
struct i915_active last_flip;
|
|
|
|
void (*flip_complete)(struct intel_overlay *ovl);
|
2010-08-12 19:53:37 +07:00
|
|
|
};
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-12-08 00:28:12 +07:00
|
|
|
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
|
|
|
bool enable)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = dev_priv->drm.pdev;
|
|
|
|
u8 val;
|
|
|
|
|
|
|
|
/* WA_OVERLAY_CLKGATE:alm */
|
|
|
|
if (enable)
|
|
|
|
I915_WRITE(DSPCLK_GATE_D, 0);
|
|
|
|
else
|
|
|
|
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
|
|
|
|
|
|
|
|
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
|
|
|
|
pci_bus_read_config_byte(pdev->bus,
|
|
|
|
PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
|
|
|
|
if (enable)
|
|
|
|
val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
|
|
|
|
else
|
|
|
|
val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
|
|
|
|
pci_bus_write_config_byte(pdev->bus,
|
|
|
|
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
|
|
|
|
}
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
static struct i915_request *
|
|
|
|
alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2019-08-13 00:48:04 +07:00
|
|
|
struct i915_request *rq;
|
|
|
|
int err;
|
2012-09-26 19:47:30 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
overlay->flip_complete = fn;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
rq = i915_request_create(overlay->context);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
return rq;
|
|
|
|
|
drm/i915: Mark i915_request.timeline as a volatile, rcu pointer
The request->timeline is only valid until the request is retired (i.e.
before it is completed). Upon retiring the request, the context may be
unpinned and freed, and along with it the timeline may be freed. We
therefore need to be very careful when chasing rq->timeline that the
pointer does not disappear beneath us. The vast majority of users are in
a protected context, either during request construction or retirement,
where the timeline->mutex is held and the timeline cannot disappear. It
is those few off the beaten path (where we access a second timeline) that
need extra scrutiny -- to be added in the next patch after first adding
the warnings about dangerous access.
One complication, where we cannot use the timeline->mutex itself, is
during request submission onto hardware (under spinlocks). Here, we want
to check on the timeline to finalize the breadcrumb, and so we need to
impose a second rule to ensure that the request->timeline is indeed
valid. As we are submitting the request, it's context and timeline must
be pinned, as it will be used by the hardware. Since it is pinned, we
know the request->timeline must still be valid, and we cannot submit the
idle barrier until after we release the engine->active.lock, ergo while
submitting and holding that spinlock, a second thread cannot release the
timeline.
v2: Don't be lazy inside selftests; hold the timeline->mutex for as long
as we need it, and tidy up acquiring the timeline with a bit of
refactoring (i915_active_add_request)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk
2019-09-19 18:19:10 +07:00
|
|
|
err = i915_active_add_request(&overlay->last_flip, rq);
|
2019-08-13 00:48:04 +07:00
|
|
|
if (err) {
|
|
|
|
i915_request_add(rq);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rq;
|
2016-08-03 04:50:26 +07:00
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* overlay needs to be disable in OCMD reg */
|
|
|
|
static int intel_overlay_on(struct intel_overlay *overlay)
|
|
|
|
{
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 18:32:42 +07:00
|
|
|
u32 *cs;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2015-03-31 14:37:22 +07:00
|
|
|
WARN_ON(overlay->active);
|
2010-07-16 23:13:01 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
rq = alloc_request(overlay, NULL);
|
2018-02-21 16:56:36 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
2010-10-27 18:45:26 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
cs = intel_ring_begin(rq, 4);
|
2017-02-14 18:32:42 +07:00
|
|
|
if (IS_ERR(cs)) {
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 18:32:42 +07:00
|
|
|
return PTR_ERR(cs);
|
2015-05-29 23:43:47 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 14:37:24 +07:00
|
|
|
overlay->active = true;
|
|
|
|
|
2016-12-08 00:28:12 +07:00
|
|
|
if (IS_I830(dev_priv))
|
|
|
|
i830_overlay_clock_gating(dev_priv, false);
|
|
|
|
|
2017-02-14 18:32:42 +07:00
|
|
|
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
|
|
|
|
*cs++ = overlay->flip_addr | OFC_UPDATE;
|
|
|
|
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
|
|
|
*cs++ = MI_NOOP;
|
2018-02-21 16:56:36 +07:00
|
|
|
intel_ring_advance(rq, cs);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
return i915_active_wait(&overlay->last_flip);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
|
|
|
struct i915_vma *vma)
|
|
|
|
{
|
|
|
|
enum pipe pipe = overlay->crtc->pipe;
|
|
|
|
|
|
|
|
WARN_ON(overlay->old_vma);
|
|
|
|
|
2019-08-16 14:46:35 +07:00
|
|
|
intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
|
|
|
|
vma ? vma->obj->frontbuffer : NULL,
|
|
|
|
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
2016-12-08 00:28:06 +07:00
|
|
|
|
|
|
|
intel_frontbuffer_flip_prepare(overlay->i915,
|
|
|
|
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
|
|
|
|
|
|
|
overlay->old_vma = overlay->vma;
|
|
|
|
if (vma)
|
|
|
|
overlay->vma = i915_vma_get(vma);
|
|
|
|
else
|
|
|
|
overlay->vma = NULL;
|
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* overlay needs to be enabled in OCMD reg */
|
2010-08-12 18:36:12 +07:00
|
|
|
static int intel_overlay_continue(struct intel_overlay *overlay,
|
2016-12-08 00:28:06 +07:00
|
|
|
struct i915_vma *vma,
|
2010-08-12 18:36:12 +07:00
|
|
|
bool load_polyphase_filter)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2009-09-16 03:57:34 +07:00
|
|
|
u32 flip_addr = overlay->flip_addr;
|
2017-02-14 18:32:42 +07:00
|
|
|
u32 tmp, *cs;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2015-03-31 14:37:22 +07:00
|
|
|
WARN_ON(!overlay->active);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
if (load_polyphase_filter)
|
|
|
|
flip_addr |= OFC_UPDATE;
|
|
|
|
|
|
|
|
/* check for underruns */
|
|
|
|
tmp = I915_READ(DOVSTA);
|
|
|
|
if (tmp & (1 << 17))
|
|
|
|
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
rq = alloc_request(overlay, NULL);
|
2018-02-21 16:56:36 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
2012-09-26 19:47:30 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
cs = intel_ring_begin(rq, 2);
|
2017-02-14 18:32:42 +07:00
|
|
|
if (IS_ERR(cs)) {
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 18:32:42 +07:00
|
|
|
return PTR_ERR(cs);
|
2015-05-29 23:43:47 +07:00
|
|
|
}
|
|
|
|
|
2017-02-14 18:32:42 +07:00
|
|
|
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
|
|
|
|
*cs++ = flip_addr;
|
2018-02-21 16:56:36 +07:00
|
|
|
intel_ring_advance(rq, cs);
|
2009-09-16 03:57:36 +07:00
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
intel_overlay_flip_prepare(overlay, vma);
|
2019-08-13 00:48:04 +07:00
|
|
|
i915_request_add(rq);
|
2015-05-29 23:43:24 +07:00
|
|
|
|
|
|
|
return 0;
|
2009-09-16 03:57:36 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
2009-09-16 03:57:36 +07:00
|
|
|
{
|
2016-08-15 16:49:01 +07:00
|
|
|
struct i915_vma *vma;
|
2009-09-16 03:57:36 +07:00
|
|
|
|
2016-08-15 16:49:01 +07:00
|
|
|
vma = fetch_and_zero(&overlay->old_vma);
|
|
|
|
if (WARN_ON(!vma))
|
|
|
|
return;
|
2016-08-04 13:52:37 +07:00
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
intel_frontbuffer_flip_complete(overlay->i915,
|
|
|
|
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
2009-09-16 03:57:36 +07:00
|
|
|
|
2016-08-15 16:49:06 +07:00
|
|
|
i915_gem_object_unpin_from_display_plane(vma);
|
2016-08-15 16:49:01 +07:00
|
|
|
i915_vma_put(vma);
|
2010-08-12 20:03:48 +07:00
|
|
|
}
|
2009-09-16 03:57:37 +07:00
|
|
|
|
drm/i915: Pull i915_gem_active into the i915_active family
Looking forward, we need to break the struct_mutex dependency on
i915_gem_active. In the meantime, external use of i915_gem_active is
quite beguiling, little do new users suspect that it implies a barrier
as each request it tracks must be ordered wrt the previous one. As one
of many, it can be used to track activity across multiple timelines, a
shared fence, which fits our unordered request submission much better. We
need to steer external users away from the singular, exclusive fence
imposed by i915_gem_active to i915_active instead. As part of that
process, we move i915_gem_active out of i915_request.c into
i915_active.c to start separating the two concepts, and rename it to
i915_active_request (both to tie it to the concept of tracking just one
request, and to give it a longer, less appealing name).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190205130005.2807-5-chris@chris-wilson.co.uk
2019-02-05 20:00:05 +07:00
|
|
|
static void
|
2019-08-13 00:48:04 +07:00
|
|
|
intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
2016-12-08 00:28:06 +07:00
|
|
|
{
|
|
|
|
intel_overlay_release_old_vma(overlay);
|
|
|
|
}
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
2010-08-12 20:03:48 +07:00
|
|
|
{
|
2016-12-08 00:28:12 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
intel_overlay_release_old_vma(overlay);
|
2009-09-16 03:57:37 +07:00
|
|
|
|
2010-08-12 20:03:48 +07:00
|
|
|
overlay->crtc->overlay = NULL;
|
|
|
|
overlay->crtc = NULL;
|
2015-03-31 14:37:23 +07:00
|
|
|
overlay->active = false;
|
2016-12-08 00:28:12 +07:00
|
|
|
|
|
|
|
if (IS_I830(dev_priv))
|
|
|
|
i830_overlay_clock_gating(dev_priv, true);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
static void
|
|
|
|
intel_overlay_last_flip_retire(struct i915_active *active)
|
|
|
|
{
|
|
|
|
struct intel_overlay *overlay =
|
|
|
|
container_of(active, typeof(*overlay), last_flip);
|
|
|
|
|
|
|
|
if (overlay->flip_complete)
|
|
|
|
overlay->flip_complete(overlay);
|
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* overlay needs to be disabled in OCMD reg */
|
2011-02-21 21:43:56 +07:00
|
|
|
static int intel_overlay_off(struct intel_overlay *overlay)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2018-02-21 16:56:36 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 18:32:42 +07:00
|
|
|
u32 *cs, flip_addr = overlay->flip_addr;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2015-03-31 14:37:22 +07:00
|
|
|
WARN_ON(!overlay->active);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
/* According to intel docs the overlay hw may hang (when switching
|
|
|
|
* off) without loading the filter coeffs. It is however unclear whether
|
|
|
|
* this applies to the disabling of the overlay or to the switching off
|
|
|
|
* of the hw. Do it in both cases */
|
|
|
|
flip_addr |= OFC_UPDATE;
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
rq = alloc_request(overlay, intel_overlay_off_tail);
|
2018-02-21 16:56:36 +07:00
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
2012-09-26 19:47:30 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
cs = intel_ring_begin(rq, 6);
|
2017-02-14 18:32:42 +07:00
|
|
|
if (IS_ERR(cs)) {
|
2018-02-21 16:56:36 +07:00
|
|
|
i915_request_add(rq);
|
2017-02-14 18:32:42 +07:00
|
|
|
return PTR_ERR(cs);
|
2015-05-29 23:43:47 +07:00
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* wait for overlay to go idle */
|
2017-02-14 18:32:42 +07:00
|
|
|
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
|
|
|
|
*cs++ = flip_addr;
|
|
|
|
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
2016-12-23 02:52:22 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* turn overlay off */
|
2017-02-14 18:32:42 +07:00
|
|
|
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
|
|
|
|
*cs++ = flip_addr;
|
|
|
|
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
2016-12-23 02:52:22 +07:00
|
|
|
|
2018-02-21 16:56:36 +07:00
|
|
|
intel_ring_advance(rq, cs);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
intel_overlay_flip_prepare(overlay, NULL);
|
2019-08-13 00:48:04 +07:00
|
|
|
i915_request_add(rq);
|
2016-12-08 00:28:06 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
return i915_active_wait(&overlay->last_flip);
|
2037-04-25 15:08:26 +07:00
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:37 +07:00
|
|
|
/* recover from an interruption due to a signal
|
|
|
|
* We have to be careful not to repeat work forever an make forward progess. */
|
2011-02-21 21:43:56 +07:00
|
|
|
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
2009-09-16 03:57:37 +07:00
|
|
|
{
|
2019-08-13 00:48:04 +07:00
|
|
|
return i915_active_wait(&overlay->last_flip);
|
2009-09-16 03:57:37 +07:00
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:36 +07:00
|
|
|
/* Wait for pending overlay flip and release old frame.
|
|
|
|
* Needs to be called before the overlay register are changed
|
2010-08-12 16:35:26 +07:00
|
|
|
* via intel_overlay_(un)map_regs
|
|
|
|
*/
|
2009-09-16 03:57:34 +07:00
|
|
|
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
|
|
|
{
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2019-08-13 00:48:04 +07:00
|
|
|
struct i915_request *rq;
|
2017-02-14 18:32:42 +07:00
|
|
|
u32 *cs;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
/*
|
|
|
|
* Only wait if there is actually an old frame to release to
|
2010-08-12 18:21:54 +07:00
|
|
|
* guarantee forward progress.
|
|
|
|
*/
|
2016-08-15 16:49:01 +07:00
|
|
|
if (!overlay->old_vma)
|
2009-09-16 03:57:37 +07:00
|
|
|
return 0;
|
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
|
|
|
|
intel_overlay_release_old_vid_tail(overlay);
|
|
|
|
return 0;
|
|
|
|
}
|
2015-05-29 23:43:47 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
2010-10-27 18:45:26 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
cs = intel_ring_begin(rq, 2);
|
|
|
|
if (IS_ERR(cs)) {
|
|
|
|
i915_request_add(rq);
|
|
|
|
return PTR_ERR(cs);
|
|
|
|
}
|
2015-05-29 23:43:47 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
|
|
|
*cs++ = MI_NOOP;
|
|
|
|
intel_ring_advance(rq, cs);
|
2010-08-12 18:21:54 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
i915_request_add(rq);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2019-08-13 00:48:04 +07:00
|
|
|
return i915_active_wait(&overlay->last_flip);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2014-11-26 22:07:29 +07:00
|
|
|
void intel_overlay_reset(struct drm_i915_private *dev_priv)
|
|
|
|
{
|
|
|
|
struct intel_overlay *overlay = dev_priv->overlay;
|
|
|
|
|
|
|
|
if (!overlay)
|
|
|
|
return;
|
|
|
|
|
|
|
|
overlay->old_xscale = 0;
|
|
|
|
overlay->old_yscale = 0;
|
|
|
|
overlay->crtc = NULL;
|
|
|
|
overlay->active = false;
|
|
|
|
}
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
static int packed_depth_bytes(u32 format)
|
|
|
|
{
|
|
|
|
switch (format & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
return 4;
|
|
|
|
case I915_OVERLAY_YUV411:
|
|
|
|
/* return 6; not implemented */
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int packed_width_bytes(u32 format, short width)
|
|
|
|
{
|
|
|
|
switch (format & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
return width << 1;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uv_hsubsampling(u32 format)
|
|
|
|
{
|
|
|
|
switch (format & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
case I915_OVERLAY_YUV420:
|
|
|
|
return 2;
|
|
|
|
case I915_OVERLAY_YUV411:
|
|
|
|
case I915_OVERLAY_YUV410:
|
|
|
|
return 4;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int uv_vsubsampling(u32 format)
|
|
|
|
{
|
|
|
|
switch (format & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV420:
|
|
|
|
case I915_OVERLAY_YUV410:
|
|
|
|
return 2;
|
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
case I915_OVERLAY_YUV411:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2016-12-08 00:28:09 +07:00
|
|
|
u32 sw;
|
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 2))
|
2016-12-08 00:28:09 +07:00
|
|
|
sw = ALIGN((offset & 31) + width, 32);
|
|
|
|
else
|
|
|
|
sw = ALIGN((offset & 63) + width, 64);
|
|
|
|
|
|
|
|
if (sw == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return (sw - 32) >> 3;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2016-12-08 00:28:10 +07:00
|
|
|
static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
|
|
|
|
[ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
|
|
|
|
[ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
|
|
|
|
[ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
|
|
|
|
[ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
|
|
|
|
[ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
|
|
|
|
[ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
|
|
|
|
[ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
|
|
|
|
[ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
|
|
|
|
[ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
|
|
|
|
[ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
|
|
|
|
[10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
|
|
|
|
[11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
|
|
|
|
[12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
|
|
|
|
[13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
|
|
|
|
[14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
|
|
|
|
[15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
|
|
|
|
[16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
|
2010-08-12 15:28:50 +07:00
|
|
|
};
|
|
|
|
|
2016-12-08 00:28:10 +07:00
|
|
|
static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
|
|
|
|
[ 0] = { 0x3000, 0x1800, 0x1800, },
|
|
|
|
[ 1] = { 0xb000, 0x18d0, 0x2e60, },
|
|
|
|
[ 2] = { 0xb000, 0x1990, 0x2ce0, },
|
|
|
|
[ 3] = { 0xb020, 0x1a68, 0x2b40, },
|
|
|
|
[ 4] = { 0xb040, 0x1b20, 0x29e0, },
|
|
|
|
[ 5] = { 0xb060, 0x1bd8, 0x2880, },
|
|
|
|
[ 6] = { 0xb080, 0x1c88, 0x3e60, },
|
|
|
|
[ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
|
|
|
|
[ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
|
|
|
|
[ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
|
|
|
|
[10] = { 0xb100, 0x1eb8, 0x3620, },
|
|
|
|
[11] = { 0xb100, 0x1f18, 0x34a0, },
|
|
|
|
[12] = { 0xb100, 0x1f68, 0x3360, },
|
|
|
|
[13] = { 0xb0e0, 0x1fa8, 0x3240, },
|
|
|
|
[14] = { 0xb0c0, 0x1fe0, 0x3140, },
|
|
|
|
[15] = { 0xb060, 0x1ff0, 0x30a0, },
|
|
|
|
[16] = { 0x3000, 0x0800, 0x3000, },
|
2010-08-12 15:28:50 +07:00
|
|
|
};
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
static void update_polyphase_filter(struct overlay_registers __iomem *regs)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2012-04-17 04:07:43 +07:00
|
|
|
memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
|
|
|
|
memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
|
|
|
|
sizeof(uv_static_hcoeffs));
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool update_scaling_factors(struct intel_overlay *overlay,
|
2012-04-17 04:07:43 +07:00
|
|
|
struct overlay_registers __iomem *regs,
|
2018-09-07 02:01:44 +07:00
|
|
|
struct drm_intel_overlay_put_image *params)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
/* fixed point with a 12 bit shift */
|
|
|
|
u32 xscale, yscale, xscale_UV, yscale_UV;
|
|
|
|
#define FP_SHIFT 12
|
|
|
|
#define FRACT_MASK 0xfff
|
|
|
|
bool scale_changed = false;
|
2018-09-07 02:01:44 +07:00
|
|
|
int uv_hscale = uv_hsubsampling(params->flags);
|
|
|
|
int uv_vscale = uv_vsubsampling(params->flags);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (params->dst_width > 1)
|
|
|
|
xscale = ((params->src_scan_width - 1) << FP_SHIFT) /
|
|
|
|
params->dst_width;
|
2009-09-16 03:57:34 +07:00
|
|
|
else
|
|
|
|
xscale = 1 << FP_SHIFT;
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (params->dst_height > 1)
|
|
|
|
yscale = ((params->src_scan_height - 1) << FP_SHIFT) /
|
|
|
|
params->dst_height;
|
2009-09-16 03:57:34 +07:00
|
|
|
else
|
|
|
|
yscale = 1 << FP_SHIFT;
|
|
|
|
|
|
|
|
/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
|
2010-08-12 15:28:50 +07:00
|
|
|
xscale_UV = xscale/uv_hscale;
|
|
|
|
yscale_UV = yscale/uv_vscale;
|
|
|
|
/* make the Y scale to UV scale ratio an exact multiply */
|
|
|
|
xscale = xscale_UV * uv_hscale;
|
|
|
|
yscale = yscale_UV * uv_vscale;
|
2009-09-16 03:57:34 +07:00
|
|
|
/*} else {
|
2010-08-12 15:28:50 +07:00
|
|
|
xscale_UV = 0;
|
|
|
|
yscale_UV = 0;
|
|
|
|
}*/
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
|
|
|
|
scale_changed = true;
|
|
|
|
overlay->old_xscale = xscale;
|
|
|
|
overlay->old_yscale = yscale;
|
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32(((yscale & FRACT_MASK) << 20) |
|
|
|
|
((xscale >> FP_SHIFT) << 16) |
|
|
|
|
((xscale & FRACT_MASK) << 3),
|
|
|
|
®s->YRGBSCALE);
|
2010-08-12 15:28:50 +07:00
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32(((yscale_UV & FRACT_MASK) << 20) |
|
|
|
|
((xscale_UV >> FP_SHIFT) << 16) |
|
|
|
|
((xscale_UV & FRACT_MASK) << 3),
|
|
|
|
®s->UVSCALE);
|
2010-08-12 15:28:50 +07:00
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32((((yscale >> FP_SHIFT) << 16) |
|
|
|
|
((yscale_UV >> FP_SHIFT) << 0)),
|
|
|
|
®s->UVSCALEV);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
if (scale_changed)
|
|
|
|
update_polyphase_filter(regs);
|
|
|
|
|
|
|
|
return scale_changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_colorkey(struct intel_overlay *overlay,
|
2012-04-17 04:07:43 +07:00
|
|
|
struct overlay_registers __iomem *regs)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2016-12-08 00:28:11 +07:00
|
|
|
const struct intel_plane_state *state =
|
|
|
|
to_intel_plane_state(overlay->crtc->base.primary->state);
|
2009-09-16 03:57:34 +07:00
|
|
|
u32 key = overlay->color_key;
|
2016-12-08 00:28:11 +07:00
|
|
|
u32 format = 0;
|
|
|
|
u32 flags = 0;
|
2015-04-02 16:35:08 +07:00
|
|
|
|
|
|
|
if (overlay->color_key_enabled)
|
|
|
|
flags |= DST_KEY_ENABLE;
|
2010-08-12 15:30:58 +07:00
|
|
|
|
2016-12-08 00:28:11 +07:00
|
|
|
if (state->base.visible)
|
2017-01-04 17:41:10 +07:00
|
|
|
format = state->base.fb->format->format;
|
2016-12-08 00:28:11 +07:00
|
|
|
|
|
|
|
switch (format) {
|
|
|
|
case DRM_FORMAT_C8:
|
2015-04-02 16:35:08 +07:00
|
|
|
key = 0;
|
|
|
|
flags |= CLK_RGB8I_MASK;
|
2010-08-12 15:30:58 +07:00
|
|
|
break;
|
2016-12-08 00:28:11 +07:00
|
|
|
case DRM_FORMAT_XRGB1555:
|
|
|
|
key = RGB15_TO_COLORKEY(key);
|
|
|
|
flags |= CLK_RGB15_MASK;
|
2010-08-12 15:30:58 +07:00
|
|
|
break;
|
2016-12-08 00:28:11 +07:00
|
|
|
case DRM_FORMAT_RGB565:
|
|
|
|
key = RGB16_TO_COLORKEY(key);
|
|
|
|
flags |= CLK_RGB16_MASK;
|
|
|
|
break;
|
|
|
|
default:
|
2015-04-02 16:35:08 +07:00
|
|
|
flags |= CLK_RGB24_MASK;
|
2010-08-12 15:30:58 +07:00
|
|
|
break;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
2015-04-02 16:35:08 +07:00
|
|
|
|
|
|
|
iowrite32(key, ®s->DCLRKV);
|
|
|
|
iowrite32(flags, ®s->DCLRKM);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
|
|
|
|
switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
cmd |= OCMD_YUV_422_PLANAR;
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_YUV420:
|
|
|
|
cmd |= OCMD_YUV_420_PLANAR;
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_YUV411:
|
|
|
|
case I915_OVERLAY_YUV410:
|
|
|
|
cmd |= OCMD_YUV_410_PLANAR;
|
|
|
|
break;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
} else { /* YUV packed */
|
2018-09-07 02:01:44 +07:00
|
|
|
switch (params->flags & I915_OVERLAY_DEPTH_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV422:
|
|
|
|
cmd |= OCMD_YUV_422_PACKED;
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_YUV411:
|
|
|
|
cmd |= OCMD_YUV_411_PACKED;
|
|
|
|
break;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
switch (params->flags & I915_OVERLAY_SWAP_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_NO_SWAP:
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_UV_SWAP:
|
|
|
|
cmd |= OCMD_UV_SWAP;
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_Y_SWAP:
|
|
|
|
cmd |= OCMD_Y_SWAP;
|
|
|
|
break;
|
|
|
|
case I915_OVERLAY_Y_AND_UV_SWAP:
|
|
|
|
cmd |= OCMD_Y_AND_UV_SWAP;
|
|
|
|
break;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
2010-08-12 18:38:21 +07:00
|
|
|
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
2010-11-09 02:18:58 +07:00
|
|
|
struct drm_i915_gem_object *new_bo,
|
2018-09-07 02:01:44 +07:00
|
|
|
struct drm_intel_overlay_put_image *params)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2018-09-07 02:01:43 +07:00
|
|
|
struct overlay_registers __iomem *regs = overlay->regs;
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2012-04-17 04:07:43 +07:00
|
|
|
u32 swidth, swidthsw, sheight, ostride;
|
2014-06-19 04:28:09 +07:00
|
|
|
enum pipe pipe = overlay->crtc->pipe;
|
2018-09-07 02:01:43 +07:00
|
|
|
bool scale_changed = false;
|
2016-08-15 16:49:01 +07:00
|
|
|
struct i915_vma *vma;
|
2018-09-07 02:01:43 +07:00
|
|
|
int ret, tmp_width;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-07-05 16:40:23 +07:00
|
|
|
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
ret = intel_overlay_release_old_vid(overlay);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
drm/i915: More surgically unbreak the modeset vs reset deadlock
There's no reason to entirely wedge the gpu, for the minimal deadlock
bugfix we only need to unbreak/decouple the atomic commit from the gpu
reset. The simplest way to fix that is by replacing the
unconditional fence wait a the top of commit_tail by a wait which
completes either when the fences are done (normal case, or when a
reset doesn't need to touch the display state). Or when the gpu reset
needs to force-unblock all pending modeset states.
The lesser source of deadlocks is when we try to pin a new framebuffer
and run into a stall. There's a bunch of places this can happen, like
eviction, changing the caching mode, acquiring a fence on older
platforms. And we can't just break the depency loop and keep going,
the only way would be to break out and restart. But the problem with
that approach is that we must stall for the reset to complete before
we grab any locks, and with the atomic infrastructure that's a bit
tricky. The only place is the ioctl code, and we don't want to insert
code into e.g. the BUSY ioctl. Hence for that problem just create a
critical section, and if any code is in there, wedge the GPU. For the
steady-state this should never be a problem.
Note that in both cases TDR itself keeps working, so from a userspace
pov this trickery isn't observable. Users themselvs might spot a short
glitch while the rendering is catching up again, but that's still
better than pre-TDR where we've thrown away all the rendering,
including innocent batches. Also, this fixes the regression TDR
introduced of making gpu resets deadlock-prone when we do need to
touch the display.
One thing I noticed is that gpu_error.flags seems to use both our own
wait-queue in gpu_error.wait_queue, and the generic wait_on_bit
facilities. Not entirely sure why this inconsistency exists, I just
picked one style.
A possible future avenue could be to insert the gpu reset in-between
ongoing modeset changes, which would avoid the momentary glitch. But
that's a lot more work to implement in the atomic commit machinery,
and given that we only need this for pre-g4x hw, of questionable
utility just for the sake of polishing gpu reset even more on those
old boxes. It might be useful for other features though.
v2: Rebase onto 4.13 with a s/wait_queue_t/struct wait_queue_entry/.
v3: Really emabarrassing fixup, I checked the wrong bit and broke the
unbreak/wakeup logic.
v4: Also handle deadlocks in pin_to_display.
v5: Review from Michel:
- Fixup the BUILD_BUG_ON
- Don't forget about the overlay
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> (v2)
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170808080828.23650-3-daniel.vetter@ffwll.ch
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
2017-08-08 15:08:28 +07:00
|
|
|
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
|
|
|
|
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_gem_object_lock(new_bo);
|
2018-02-20 20:42:06 +07:00
|
|
|
vma = i915_gem_object_pin_to_display_plane(new_bo,
|
|
|
|
0, NULL, PIN_MAPPABLE);
|
2019-05-28 16:29:51 +07:00
|
|
|
i915_gem_object_unlock(new_bo);
|
drm/i915: More surgically unbreak the modeset vs reset deadlock
There's no reason to entirely wedge the gpu, for the minimal deadlock
bugfix we only need to unbreak/decouple the atomic commit from the gpu
reset. The simplest way to fix that is by replacing the
unconditional fence wait a the top of commit_tail by a wait which
completes either when the fences are done (normal case, or when a
reset doesn't need to touch the display state). Or when the gpu reset
needs to force-unblock all pending modeset states.
The lesser source of deadlocks is when we try to pin a new framebuffer
and run into a stall. There's a bunch of places this can happen, like
eviction, changing the caching mode, acquiring a fence on older
platforms. And we can't just break the depency loop and keep going,
the only way would be to break out and restart. But the problem with
that approach is that we must stall for the reset to complete before
we grab any locks, and with the atomic infrastructure that's a bit
tricky. The only place is the ioctl code, and we don't want to insert
code into e.g. the BUSY ioctl. Hence for that problem just create a
critical section, and if any code is in there, wedge the GPU. For the
steady-state this should never be a problem.
Note that in both cases TDR itself keeps working, so from a userspace
pov this trickery isn't observable. Users themselvs might spot a short
glitch while the rendering is catching up again, but that's still
better than pre-TDR where we've thrown away all the rendering,
including innocent batches. Also, this fixes the regression TDR
introduced of making gpu resets deadlock-prone when we do need to
touch the display.
One thing I noticed is that gpu_error.flags seems to use both our own
wait-queue in gpu_error.wait_queue, and the generic wait_on_bit
facilities. Not entirely sure why this inconsistency exists, I just
picked one style.
A possible future avenue could be to insert the gpu reset in-between
ongoing modeset changes, which would avoid the momentary glitch. But
that's a lot more work to implement in the atomic commit machinery,
and given that we only need this for pre-g4x hw, of questionable
utility just for the sake of polishing gpu reset even more on those
old boxes. It might be useful for other features though.
v2: Rebase onto 4.13 with a s/wait_queue_t/struct wait_queue_entry/.
v3: Really emabarrassing fixup, I checked the wrong bit and broke the
unbreak/wakeup logic.
v4: Also handle deadlocks in pin_to_display.
v5: Review from Michel:
- Fixup the BUILD_BUG_ON
- Don't forget about the overlay
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> (v2)
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170808080828.23650-3-daniel.vetter@ffwll.ch
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
2017-08-08 15:08:28 +07:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
ret = PTR_ERR(vma);
|
|
|
|
goto out_pin_section;
|
|
|
|
}
|
2019-08-16 14:46:35 +07:00
|
|
|
intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
|
2016-08-15 16:49:01 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
if (!overlay->active) {
|
2012-04-17 04:07:43 +07:00
|
|
|
u32 oconfig;
|
2018-09-07 02:01:43 +07:00
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
oconfig = OCONF_CC_OUT_8BIT;
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 4))
|
2012-04-17 04:07:43 +07:00
|
|
|
oconfig |= OCONF_CSC_MODE_BT709;
|
2014-06-19 04:28:09 +07:00
|
|
|
oconfig |= pipe == 0 ?
|
2009-09-16 03:57:34 +07:00
|
|
|
OCONF_PIPE_A : OCONF_PIPE_B;
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32(oconfig, ®s->OCONFIG);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
ret = intel_overlay_on(overlay);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
iowrite32(params->dst_y << 16 | params->dst_x, ®s->DWINPOS);
|
|
|
|
iowrite32(params->dst_height << 16 | params->dst_width, ®s->DWINSZ);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (params->flags & I915_OVERLAY_YUV_PACKED)
|
|
|
|
tmp_width = packed_width_bytes(params->flags,
|
|
|
|
params->src_width);
|
2009-09-16 03:57:34 +07:00
|
|
|
else
|
2018-09-07 02:01:44 +07:00
|
|
|
tmp_width = params->src_width;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
swidth = params->src_width;
|
2016-05-12 18:43:23 +07:00
|
|
|
swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
|
2018-09-07 02:01:44 +07:00
|
|
|
sheight = params->src_height;
|
2016-08-15 16:49:07 +07:00
|
|
|
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, ®s->OBUF_0Y);
|
2012-04-17 04:07:43 +07:00
|
|
|
ostride = params->stride_Y;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
|
|
|
|
int uv_hscale = uv_hsubsampling(params->flags);
|
|
|
|
int uv_vscale = uv_vsubsampling(params->flags);
|
2009-09-16 03:57:34 +07:00
|
|
|
u32 tmp_U, tmp_V;
|
2018-09-07 02:01:44 +07:00
|
|
|
|
|
|
|
swidth |= (params->src_width / uv_hscale) << 16;
|
|
|
|
sheight |= (params->src_height / uv_vscale) << 16;
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
tmp_U = calc_swidthsw(dev_priv, params->offset_U,
|
2018-09-07 02:01:44 +07:00
|
|
|
params->src_width / uv_hscale);
|
2016-05-12 18:43:23 +07:00
|
|
|
tmp_V = calc_swidthsw(dev_priv, params->offset_V,
|
2018-09-07 02:01:44 +07:00
|
|
|
params->src_width / uv_hscale);
|
|
|
|
swidthsw |= max(tmp_U, tmp_V) << 16;
|
|
|
|
|
2016-08-15 16:49:07 +07:00
|
|
|
iowrite32(i915_ggtt_offset(vma) + params->offset_U,
|
|
|
|
®s->OBUF_0U);
|
|
|
|
iowrite32(i915_ggtt_offset(vma) + params->offset_V,
|
|
|
|
®s->OBUF_0V);
|
2018-09-07 02:01:44 +07:00
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
ostride |= params->stride_UV << 16;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32(swidth, ®s->SWIDTH);
|
|
|
|
iowrite32(swidthsw, ®s->SWIDTHSW);
|
|
|
|
iowrite32(sheight, ®s->SHEIGHT);
|
|
|
|
iowrite32(ostride, ®s->OSTRIDE);
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
scale_changed = update_scaling_factors(overlay, regs, params);
|
|
|
|
|
|
|
|
update_colorkey(overlay, regs);
|
|
|
|
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32(overlay_cmd_reg(params), ®s->OCMD);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-12-08 00:28:06 +07:00
|
|
|
ret = intel_overlay_continue(overlay, vma, scale_changed);
|
2010-08-12 18:36:12 +07:00
|
|
|
if (ret)
|
|
|
|
goto out_unpin;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_unpin:
|
2016-08-15 16:49:06 +07:00
|
|
|
i915_gem_object_unpin_from_display_plane(vma);
|
drm/i915: More surgically unbreak the modeset vs reset deadlock
There's no reason to entirely wedge the gpu, for the minimal deadlock
bugfix we only need to unbreak/decouple the atomic commit from the gpu
reset. The simplest way to fix that is by replacing the
unconditional fence wait a the top of commit_tail by a wait which
completes either when the fences are done (normal case, or when a
reset doesn't need to touch the display state). Or when the gpu reset
needs to force-unblock all pending modeset states.
The lesser source of deadlocks is when we try to pin a new framebuffer
and run into a stall. There's a bunch of places this can happen, like
eviction, changing the caching mode, acquiring a fence on older
platforms. And we can't just break the depency loop and keep going,
the only way would be to break out and restart. But the problem with
that approach is that we must stall for the reset to complete before
we grab any locks, and with the atomic infrastructure that's a bit
tricky. The only place is the ioctl code, and we don't want to insert
code into e.g. the BUSY ioctl. Hence for that problem just create a
critical section, and if any code is in there, wedge the GPU. For the
steady-state this should never be a problem.
Note that in both cases TDR itself keeps working, so from a userspace
pov this trickery isn't observable. Users themselvs might spot a short
glitch while the rendering is catching up again, but that's still
better than pre-TDR where we've thrown away all the rendering,
including innocent batches. Also, this fixes the regression TDR
introduced of making gpu resets deadlock-prone when we do need to
touch the display.
One thing I noticed is that gpu_error.flags seems to use both our own
wait-queue in gpu_error.wait_queue, and the generic wait_on_bit
facilities. Not entirely sure why this inconsistency exists, I just
picked one style.
A possible future avenue could be to insert the gpu reset in-between
ongoing modeset changes, which would avoid the momentary glitch. But
that's a lot more work to implement in the atomic commit machinery,
and given that we only need this for pre-g4x hw, of questionable
utility just for the sake of polishing gpu reset even more on those
old boxes. It might be useful for other features though.
v2: Rebase onto 4.13 with a s/wait_queue_t/struct wait_queue_entry/.
v3: Really emabarrassing fixup, I checked the wrong bit and broke the
unbreak/wakeup logic.
v4: Also handle deadlocks in pin_to_display.
v5: Review from Michel:
- Fixup the BUILD_BUG_ON
- Don't forget about the overlay
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> (v2)
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170808080828.23650-3-daniel.vetter@ffwll.ch
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
2017-08-08 15:08:28 +07:00
|
|
|
out_pin_section:
|
|
|
|
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-21 21:43:56 +07:00
|
|
|
int intel_overlay_switch_off(struct intel_overlay *overlay)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2010-08-12 19:50:28 +07:00
|
|
|
int ret;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-07-05 16:40:23 +07:00
|
|
|
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2011-02-21 21:43:56 +07:00
|
|
|
ret = intel_overlay_recover_from_interrupt(overlay);
|
2010-08-12 20:03:48 +07:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2009-11-30 21:55:49 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
if (!overlay->active)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = intel_overlay_release_old_vid(overlay);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
iowrite32(0, &overlay->regs->OCMD);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-08-04 13:52:37 +07:00
|
|
|
return intel_overlay_off(overlay);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
|
|
|
|
struct intel_crtc *crtc)
|
|
|
|
{
|
2010-09-13 20:19:16 +07:00
|
|
|
if (!crtc->active)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* can't use the overlay with double wide pipe */
|
2015-01-15 19:55:25 +07:00
|
|
|
if (crtc->config->double_wide)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
|
|
|
|
{
|
2016-05-12 18:43:23 +07:00
|
|
|
struct drm_i915_private *dev_priv = overlay->i915;
|
2009-09-16 03:57:34 +07:00
|
|
|
u32 pfit_control = I915_READ(PFIT_CONTROL);
|
2010-08-12 17:15:58 +07:00
|
|
|
u32 ratio;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
/* XXX: This is not the same logic as in the xorg driver, but more in
|
2010-08-12 17:15:58 +07:00
|
|
|
* line with the intel documentation for the i965
|
|
|
|
*/
|
2016-05-12 18:43:23 +07:00
|
|
|
if (INTEL_GEN(dev_priv) >= 4) {
|
2011-08-17 02:34:10 +07:00
|
|
|
/* on i965 use the PGM reg to read out the autoscaler values */
|
2010-09-17 06:32:17 +07:00
|
|
|
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
|
|
|
|
} else {
|
2010-08-12 17:15:58 +07:00
|
|
|
if (pfit_control & VERT_AUTO_SCALE)
|
|
|
|
ratio = I915_READ(PFIT_AUTO_RATIOS);
|
2009-09-16 03:57:34 +07:00
|
|
|
else
|
2010-08-12 17:15:58 +07:00
|
|
|
ratio = I915_READ(PFIT_PGM_RATIOS);
|
|
|
|
ratio >>= PFIT_VERT_SCALE_SHIFT;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
overlay->pfit_vscale_ratio = ratio;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_overlay_dst(struct intel_overlay *overlay,
|
|
|
|
struct drm_intel_overlay_put_image *rec)
|
|
|
|
{
|
2016-12-08 00:28:07 +07:00
|
|
|
const struct intel_crtc_state *pipe_config =
|
|
|
|
overlay->crtc->config;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-12-08 00:28:07 +07:00
|
|
|
if (rec->dst_x < pipe_config->pipe_src_w &&
|
|
|
|
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
|
|
|
|
rec->dst_y < pipe_config->pipe_src_h &&
|
|
|
|
rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
|
2009-09-16 03:57:34 +07:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
/* downscaling limit is 8.0 */
|
2018-09-07 02:01:44 +07:00
|
|
|
tmp = ((rec->src_scan_height << 16) / rec->dst_height) >> 16;
|
2009-09-16 03:57:34 +07:00
|
|
|
if (tmp > 7)
|
|
|
|
return -EINVAL;
|
2018-09-07 02:01:44 +07:00
|
|
|
|
|
|
|
tmp = ((rec->src_scan_width << 16) / rec->dst_width) >> 16;
|
2009-09-16 03:57:34 +07:00
|
|
|
if (tmp > 7)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
static int check_overlay_src(struct drm_i915_private *dev_priv,
|
2009-09-16 03:57:34 +07:00
|
|
|
struct drm_intel_overlay_put_image *rec,
|
2010-11-09 02:18:58 +07:00
|
|
|
struct drm_i915_gem_object *new_bo)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
int uv_hscale = uv_hsubsampling(rec->flags);
|
|
|
|
int uv_vscale = uv_vsubsampling(rec->flags);
|
2010-10-28 04:17:25 +07:00
|
|
|
u32 stride_mask;
|
|
|
|
int depth;
|
|
|
|
u32 tmp;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
/* check src dimensions */
|
2016-11-30 22:43:04 +07:00
|
|
|
if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
|
2010-08-12 15:28:50 +07:00
|
|
|
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
|
2010-08-12 17:29:34 +07:00
|
|
|
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
2010-08-12 15:28:50 +07:00
|
|
|
if (rec->src_height > IMAGE_MAX_HEIGHT ||
|
2010-08-12 17:29:34 +07:00
|
|
|
rec->src_width > IMAGE_MAX_WIDTH)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2010-08-12 17:29:34 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
/* better safe than sorry, use 4 as the maximal subsampling ratio */
|
2010-08-12 15:28:50 +07:00
|
|
|
if (rec->src_height < N_VERT_Y_TAPS*4 ||
|
2010-08-12 17:29:34 +07:00
|
|
|
rec->src_width < N_HORIZ_Y_TAPS*4)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-07-13 01:35:38 +07:00
|
|
|
/* check alignment constraints */
|
2009-09-16 03:57:34 +07:00
|
|
|
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_RGB:
|
|
|
|
/* not implemented */
|
|
|
|
return -EINVAL;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV_PACKED:
|
|
|
|
if (uv_vscale != 1)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
|
|
|
depth = packed_depth_bytes(rec->flags);
|
2010-08-12 15:28:50 +07:00
|
|
|
if (depth < 0)
|
|
|
|
return depth;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
2010-08-12 15:28:50 +07:00
|
|
|
/* ignore UV planes */
|
|
|
|
rec->stride_UV = 0;
|
|
|
|
rec->offset_U = 0;
|
|
|
|
rec->offset_V = 0;
|
|
|
|
/* check pixel alignment */
|
|
|
|
if (rec->offset_Y % depth)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_YUV_PLANAR:
|
|
|
|
if (uv_vscale < 0 || uv_hscale < 0)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
2010-08-12 15:28:50 +07:00
|
|
|
/* no offset restrictions for planar formats */
|
|
|
|
break;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
2010-08-12 15:28:50 +07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rec->src_width % uv_hscale)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* stride checking */
|
2016-11-30 22:43:04 +07:00
|
|
|
if (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
2010-07-13 01:35:38 +07:00
|
|
|
stride_mask = 255;
|
|
|
|
else
|
|
|
|
stride_mask = 63;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
|
|
|
|
return -EINVAL;
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
|
2010-08-12 17:29:34 +07:00
|
|
|
4096 : 8192;
|
|
|
|
if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* check buffer dimensions */
|
|
|
|
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
|
2010-08-12 15:28:50 +07:00
|
|
|
case I915_OVERLAY_RGB:
|
|
|
|
case I915_OVERLAY_YUV_PACKED:
|
|
|
|
/* always 4 Y values per depth pixels */
|
|
|
|
if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tmp = rec->stride_Y*rec->src_height;
|
2010-11-09 02:18:58 +07:00
|
|
|
if (rec->offset_Y + tmp > new_bo->base.size)
|
2010-08-12 15:28:50 +07:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case I915_OVERLAY_YUV_PLANAR:
|
|
|
|
if (rec->src_width > rec->stride_Y)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rec->src_width/uv_hscale > rec->stride_UV)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-08-12 17:29:34 +07:00
|
|
|
tmp = rec->stride_Y * rec->src_height;
|
2010-11-09 02:18:58 +07:00
|
|
|
if (rec->offset_Y + tmp > new_bo->base.size)
|
2010-08-12 15:28:50 +07:00
|
|
|
return -EINVAL;
|
2010-08-12 17:29:34 +07:00
|
|
|
|
|
|
|
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
|
2010-11-09 02:18:58 +07:00
|
|
|
if (rec->offset_U + tmp > new_bo->base.size ||
|
|
|
|
rec->offset_V + tmp > new_bo->base.size)
|
2010-08-12 15:28:50 +07:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2018-09-07 02:01:44 +07:00
|
|
|
struct drm_intel_overlay_put_image *params = data;
|
2016-07-04 17:34:36 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
struct intel_overlay *overlay;
|
2014-07-18 10:30:04 +07:00
|
|
|
struct drm_crtc *drmmode_crtc;
|
2009-09-16 03:57:34 +07:00
|
|
|
struct intel_crtc *crtc;
|
2010-11-09 02:18:58 +07:00
|
|
|
struct drm_i915_gem_object *new_bo;
|
2009-09-16 03:57:34 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
overlay = dev_priv->overlay;
|
|
|
|
if (!overlay) {
|
|
|
|
DRM_DEBUG("userspace bug: no overlay\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
if (!(params->flags & I915_OVERLAY_ENABLE)) {
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_lock_all(dev);
|
2011-02-21 21:43:56 +07:00
|
|
|
ret = intel_overlay_switch_off(overlay);
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_unlock_all(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
drmmode_crtc = drm_crtc_find(dev, file_priv, params->crtc_id);
|
|
|
|
if (!drmmode_crtc)
|
|
|
|
return -ENOENT;
|
2014-07-18 10:30:04 +07:00
|
|
|
crtc = to_intel_crtc(drmmode_crtc);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
|
|
|
|
if (!new_bo)
|
|
|
|
return -ENOENT;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_lock_all(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2016-08-05 16:14:23 +07:00
|
|
|
if (i915_gem_object_is_tiled(new_bo)) {
|
2014-02-14 20:06:06 +07:00
|
|
|
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
|
2010-11-10 23:40:20 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2011-02-21 21:43:56 +07:00
|
|
|
ret = intel_overlay_recover_from_interrupt(overlay);
|
2010-08-12 20:03:48 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
2009-09-16 03:57:37 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
if (overlay->crtc != crtc) {
|
2011-02-21 21:43:56 +07:00
|
|
|
ret = intel_overlay_switch_off(overlay);
|
2009-09-16 03:57:34 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
ret = check_overlay_possible_on_crtc(overlay, crtc);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
overlay->crtc = crtc;
|
|
|
|
crtc->overlay = overlay;
|
|
|
|
|
2010-09-13 07:16:10 +07:00
|
|
|
/* line too wide, i.e. one-line-mode */
|
2016-12-08 00:28:07 +07:00
|
|
|
if (crtc->config->pipe_src_w > 1024 &&
|
2016-12-08 00:28:08 +07:00
|
|
|
crtc->config->gmch_pfit.control & PFIT_ENABLE) {
|
2015-03-31 14:37:23 +07:00
|
|
|
overlay->pfit_active = true;
|
2009-09-16 03:57:34 +07:00
|
|
|
update_pfit_vscale_ratio(overlay);
|
|
|
|
} else
|
2015-03-31 14:37:23 +07:00
|
|
|
overlay->pfit_active = false;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
ret = check_overlay_dst(overlay, params);
|
2009-09-16 03:57:34 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (overlay->pfit_active) {
|
2018-09-07 02:01:44 +07:00
|
|
|
params->dst_y = (((u32)params->dst_y << 12) /
|
2010-08-12 15:28:50 +07:00
|
|
|
overlay->pfit_vscale_ratio);
|
2009-09-16 03:57:34 +07:00
|
|
|
/* shifting right rounds downwards, so add 1 */
|
2018-09-07 02:01:44 +07:00
|
|
|
params->dst_height = (((u32)params->dst_height << 12) /
|
2010-08-12 15:28:50 +07:00
|
|
|
overlay->pfit_vscale_ratio) + 1;
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
2018-09-07 02:01:44 +07:00
|
|
|
|
|
|
|
if (params->src_scan_height > params->src_height ||
|
|
|
|
params->src_scan_width > params->src_width) {
|
2009-09-16 03:57:34 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:44 +07:00
|
|
|
ret = check_overlay_src(dev_priv, params, new_bo);
|
2009-09-16 03:57:34 +07:00
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
/* Check scaling after src size to prevent a divide-by-zero. */
|
|
|
|
ret = check_overlay_scaling(params);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
ret = intel_overlay_do_put_image(overlay, new_bo, params);
|
|
|
|
if (ret != 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_unlock_all(dev);
|
2016-12-08 00:28:06 +07:00
|
|
|
i915_gem_object_put(new_bo);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_unlock:
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_unlock_all(dev);
|
2016-10-28 19:58:43 +07:00
|
|
|
i915_gem_object_put(new_bo);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_reg_attrs(struct intel_overlay *overlay,
|
2012-04-17 04:07:43 +07:00
|
|
|
struct overlay_registers __iomem *regs)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2012-04-17 04:07:43 +07:00
|
|
|
iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
|
|
|
|
®s->OCLRC0);
|
|
|
|
iowrite32(overlay->saturation, ®s->OCLRC1);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
2010-08-12 15:28:50 +07:00
|
|
|
if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
|
2009-09-16 03:57:34 +07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool check_gamma5_errata(u32 gamma5)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
if (((gamma5 >> i*8) & 0xff) == 0x80)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int check_gamma(struct drm_intel_overlay_attrs *attrs)
|
|
|
|
{
|
2010-08-12 15:28:50 +07:00
|
|
|
if (!check_gamma_bounds(0, attrs->gamma0) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
|
|
|
|
!check_gamma_bounds(attrs->gamma5, 0x00ffffff))
|
2009-09-16 03:57:34 +07:00
|
|
|
return -EINVAL;
|
2010-08-12 15:28:50 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
if (!check_gamma5_errata(attrs->gamma5))
|
|
|
|
return -EINVAL;
|
2010-08-12 15:28:50 +07:00
|
|
|
|
2009-09-16 03:57:34 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file_priv)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
struct drm_intel_overlay_attrs *attrs = data;
|
2016-07-04 17:34:36 +07:00
|
|
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
struct intel_overlay *overlay;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
overlay = dev_priv->overlay;
|
|
|
|
if (!overlay) {
|
|
|
|
DRM_DEBUG("userspace bug: no overlay\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_lock_all(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2010-08-12 16:44:45 +07:00
|
|
|
ret = -EINVAL;
|
2009-09-16 03:57:34 +07:00
|
|
|
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
|
2010-08-12 16:44:45 +07:00
|
|
|
attrs->color_key = overlay->color_key;
|
2009-09-16 03:57:34 +07:00
|
|
|
attrs->brightness = overlay->brightness;
|
2010-08-12 16:44:45 +07:00
|
|
|
attrs->contrast = overlay->contrast;
|
2009-09-16 03:57:34 +07:00
|
|
|
attrs->saturation = overlay->saturation;
|
|
|
|
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (!IS_GEN(dev_priv, 2)) {
|
2009-09-16 03:57:34 +07:00
|
|
|
attrs->gamma0 = I915_READ(OGAMC0);
|
|
|
|
attrs->gamma1 = I915_READ(OGAMC1);
|
|
|
|
attrs->gamma2 = I915_READ(OGAMC2);
|
|
|
|
attrs->gamma3 = I915_READ(OGAMC3);
|
|
|
|
attrs->gamma4 = I915_READ(OGAMC4);
|
|
|
|
attrs->gamma5 = I915_READ(OGAMC5);
|
|
|
|
}
|
|
|
|
} else {
|
2010-08-12 16:44:45 +07:00
|
|
|
if (attrs->brightness < -128 || attrs->brightness > 127)
|
2009-09-16 03:57:34 +07:00
|
|
|
goto out_unlock;
|
2010-08-12 16:44:45 +07:00
|
|
|
if (attrs->contrast > 255)
|
2009-09-16 03:57:34 +07:00
|
|
|
goto out_unlock;
|
2010-08-12 16:44:45 +07:00
|
|
|
if (attrs->saturation > 1023)
|
2009-09-16 03:57:34 +07:00
|
|
|
goto out_unlock;
|
|
|
|
|
2010-08-12 16:44:45 +07:00
|
|
|
overlay->color_key = attrs->color_key;
|
|
|
|
overlay->brightness = attrs->brightness;
|
|
|
|
overlay->contrast = attrs->contrast;
|
|
|
|
overlay->saturation = attrs->saturation;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
update_reg_attrs(overlay, overlay->regs);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
|
drm/i915: replace IS_GEN<N> with IS_GEN(..., N)
Define IS_GEN() similarly to our IS_GEN_RANGE(). but use gen instead of
gen_mask to do the comparison. Now callers can pass then gen as a parameter,
so we don't require one macro for each gen.
The following spatch was used to convert the users of these macros:
@@
expression e;
@@
(
- IS_GEN2(e)
+ IS_GEN(e, 2)
|
- IS_GEN3(e)
+ IS_GEN(e, 3)
|
- IS_GEN4(e)
+ IS_GEN(e, 4)
|
- IS_GEN5(e)
+ IS_GEN(e, 5)
|
- IS_GEN6(e)
+ IS_GEN(e, 6)
|
- IS_GEN7(e)
+ IS_GEN(e, 7)
|
- IS_GEN8(e)
+ IS_GEN(e, 8)
|
- IS_GEN9(e)
+ IS_GEN(e, 9)
|
- IS_GEN10(e)
+ IS_GEN(e, 10)
|
- IS_GEN11(e)
+ IS_GEN(e, 11)
)
v2: use IS_GEN rather than GT_GEN and compare to info.gen rather than
using the bitmask
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181212181044.15886-2-lucas.demarchi@intel.com
2018-12-13 01:10:43 +07:00
|
|
|
if (IS_GEN(dev_priv, 2))
|
2009-09-16 03:57:34 +07:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
if (overlay->active) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = check_gamma(attrs);
|
2010-08-12 16:44:45 +07:00
|
|
|
if (ret)
|
2009-09-16 03:57:34 +07:00
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
I915_WRITE(OGAMC0, attrs->gamma0);
|
|
|
|
I915_WRITE(OGAMC1, attrs->gamma1);
|
|
|
|
I915_WRITE(OGAMC2, attrs->gamma2);
|
|
|
|
I915_WRITE(OGAMC3, attrs->gamma3);
|
|
|
|
I915_WRITE(OGAMC4, attrs->gamma4);
|
|
|
|
I915_WRITE(OGAMC5, attrs->gamma5);
|
|
|
|
}
|
|
|
|
}
|
2015-04-02 16:35:08 +07:00
|
|
|
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2010-08-12 16:44:45 +07:00
|
|
|
ret = 0;
|
2009-09-16 03:57:34 +07:00
|
|
|
out_unlock:
|
2012-12-02 07:05:46 +07:00
|
|
|
drm_modeset_unlock_all(dev);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
static int get_registers(struct intel_overlay *overlay, bool use_phys)
|
|
|
|
{
|
2019-05-28 16:29:51 +07:00
|
|
|
struct drm_i915_private *i915 = overlay->i915;
|
2018-09-07 02:01:43 +07:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
|
|
|
|
2019-05-28 16:29:51 +07:00
|
|
|
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
|
2019-10-05 00:04:32 +07:00
|
|
|
if (IS_ERR(obj))
|
2019-05-28 16:29:51 +07:00
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 20:39:58 +07:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
2018-09-07 02:01:43 +07:00
|
|
|
|
|
|
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto err_put_bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (use_phys)
|
|
|
|
overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
|
|
|
|
else
|
|
|
|
overlay->flip_addr = i915_ggtt_offset(vma);
|
|
|
|
overlay->regs = i915_vma_pin_iomap(vma);
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
|
|
|
if (IS_ERR(overlay->regs)) {
|
|
|
|
err = PTR_ERR(overlay->regs);
|
|
|
|
goto err_put_bo;
|
|
|
|
}
|
|
|
|
|
|
|
|
overlay->reg_bo = obj;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_put_bo:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-11-08 07:16:47 +07:00
|
|
|
void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
|
|
|
struct intel_overlay *overlay;
|
|
|
|
int ret;
|
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
if (!HAS_OVERLAY(dev_priv))
|
2009-09-16 03:57:34 +07:00
|
|
|
return;
|
|
|
|
|
2019-07-05 03:04:53 +07:00
|
|
|
if (!HAS_ENGINE(dev_priv, RCS0))
|
|
|
|
return;
|
|
|
|
|
2013-09-19 17:18:32 +07:00
|
|
|
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
|
2009-09-16 03:57:34 +07:00
|
|
|
if (!overlay)
|
|
|
|
return;
|
2011-06-28 17:27:47 +07:00
|
|
|
|
2016-05-12 18:43:23 +07:00
|
|
|
overlay->i915 = dev_priv;
|
2019-07-05 03:04:53 +07:00
|
|
|
overlay->context = dev_priv->engine[RCS0]->kernel_context;
|
|
|
|
GEM_BUG_ON(!overlay->context);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
overlay->color_key = 0x0101fe;
|
2015-04-02 16:35:08 +07:00
|
|
|
overlay->color_key_enabled = true;
|
2009-09-16 03:57:34 +07:00
|
|
|
overlay->brightness = -19;
|
|
|
|
overlay->contrast = 75;
|
|
|
|
overlay->saturation = 146;
|
|
|
|
|
2019-10-04 20:40:00 +07:00
|
|
|
i915_active_init(&overlay->last_flip,
|
2019-08-13 00:48:04 +07:00
|
|
|
NULL, intel_overlay_last_flip_retire);
|
2016-12-21 21:45:47 +07:00
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
|
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
|
|
|
|
update_polyphase_filter(overlay->regs);
|
|
|
|
update_reg_attrs(overlay, overlay->regs);
|
2009-09-16 03:57:34 +07:00
|
|
|
|
|
|
|
dev_priv->overlay = overlay;
|
2018-09-07 02:01:43 +07:00
|
|
|
DRM_INFO("Initialized overlay support.\n");
|
2009-09-16 03:57:34 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
kfree(overlay);
|
|
|
|
}
|
|
|
|
|
2018-11-08 07:16:47 +07:00
|
|
|
void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
|
2009-09-16 03:57:34 +07:00
|
|
|
{
|
2018-09-07 02:01:43 +07:00
|
|
|
struct intel_overlay *overlay;
|
|
|
|
|
|
|
|
overlay = fetch_and_zero(&dev_priv->overlay);
|
|
|
|
if (!overlay)
|
2010-08-12 16:50:36 +07:00
|
|
|
return;
|
2009-09-16 03:57:34 +07:00
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
/*
|
|
|
|
* The bo's should be free'd by the generic code already.
|
2010-08-12 16:50:36 +07:00
|
|
|
* Furthermore modesetting teardown happens beforehand so the
|
2018-09-07 02:01:43 +07:00
|
|
|
* hardware should be off already.
|
|
|
|
*/
|
|
|
|
WARN_ON(overlay->active);
|
|
|
|
|
|
|
|
i915_gem_object_put(overlay->reg_bo);
|
2019-08-13 00:48:04 +07:00
|
|
|
i915_active_fini(&overlay->last_flip);
|
2010-08-12 16:50:36 +07:00
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
kfree(overlay);
|
2009-09-16 03:57:34 +07:00
|
|
|
}
|
2010-08-05 02:26:07 +07:00
|
|
|
|
2016-10-12 16:05:18 +07:00
|
|
|
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
|
|
|
|
2010-08-05 02:26:07 +07:00
|
|
|
struct intel_overlay_error_state {
|
|
|
|
struct overlay_registers regs;
|
|
|
|
unsigned long base;
|
|
|
|
u32 dovsta;
|
|
|
|
u32 isr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct intel_overlay_error_state *
|
2016-05-06 21:40:21 +07:00
|
|
|
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
|
2010-08-05 02:26:07 +07:00
|
|
|
{
|
|
|
|
struct intel_overlay *overlay = dev_priv->overlay;
|
|
|
|
struct intel_overlay_error_state *error;
|
|
|
|
|
|
|
|
if (!overlay || !overlay->active)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
error = kmalloc(sizeof(*error), GFP_ATOMIC);
|
|
|
|
if (error == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
error->dovsta = I915_READ(DOVSTA);
|
drm/i915: add GEN2_ prefix to the I{E, I, M, S}R registers
This discussion started because we use token pasting in the
GEN{2,3}_IRQ_INIT and GEN{2,3}_IRQ_RESET macros, so gen2-4 passes an
empty argument to those macros, making the code a little weird. The
original proposal was to just add a comment as the empty argument, but
Ville suggested we just add a prefix to the registers, and that indeed
sounds like a more elegant solution.
Now doing this is kinda against our rules for register naming since we
only add gens or platform names as register prefixes when the given
gen/platform changes a register that already existed before. On the
other hand, we have so many instances of IIR/IMR in comments that
adding a prefix would make the users of these register more easily
findable, in addition to make our token pasting macros actually
readable. So IMHO opening an exception here is worth it.
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410235344.31199-4-paulo.r.zanoni@intel.com
2019-04-11 06:53:42 +07:00
|
|
|
error->isr = I915_READ(GEN2_ISR);
|
2016-04-28 15:56:36 +07:00
|
|
|
error->base = overlay->flip_addr;
|
2010-08-05 02:26:07 +07:00
|
|
|
|
2018-09-07 02:01:43 +07:00
|
|
|
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
|
2010-08-05 02:26:07 +07:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-05-23 17:55:35 +07:00
|
|
|
intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
|
|
|
|
struct intel_overlay_error_state *error)
|
2010-08-05 02:26:07 +07:00
|
|
|
{
|
2013-05-23 17:55:35 +07:00
|
|
|
i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
|
|
|
|
error->dovsta, error->isr);
|
|
|
|
i915_error_printf(m, " Register file at 0x%08lx:\n",
|
|
|
|
error->base);
|
2010-08-05 02:26:07 +07:00
|
|
|
|
2013-05-23 17:55:35 +07:00
|
|
|
#define P(x) i915_error_printf(m, " " #x ": 0x%08x\n", error->regs.x)
|
2010-08-05 02:26:07 +07:00
|
|
|
P(OBUF_0Y);
|
|
|
|
P(OBUF_1Y);
|
|
|
|
P(OBUF_0U);
|
|
|
|
P(OBUF_0V);
|
|
|
|
P(OBUF_1U);
|
|
|
|
P(OBUF_1V);
|
|
|
|
P(OSTRIDE);
|
|
|
|
P(YRGB_VPH);
|
|
|
|
P(UV_VPH);
|
|
|
|
P(HORZ_PH);
|
|
|
|
P(INIT_PHS);
|
|
|
|
P(DWINPOS);
|
|
|
|
P(DWINSZ);
|
|
|
|
P(SWIDTH);
|
|
|
|
P(SWIDTHSW);
|
|
|
|
P(SHEIGHT);
|
|
|
|
P(YRGBSCALE);
|
|
|
|
P(UVSCALE);
|
|
|
|
P(OCLRC0);
|
|
|
|
P(OCLRC1);
|
|
|
|
P(DCLRKV);
|
|
|
|
P(DCLRKM);
|
|
|
|
P(SCLRKVH);
|
|
|
|
P(SCLRKVL);
|
|
|
|
P(SCLRKEN);
|
|
|
|
P(OCONFIG);
|
|
|
|
P(OCMD);
|
|
|
|
P(OSTART_0Y);
|
|
|
|
P(OSTART_1Y);
|
|
|
|
P(OSTART_0U);
|
|
|
|
P(OSTART_0V);
|
|
|
|
P(OSTART_1U);
|
|
|
|
P(OSTART_1V);
|
|
|
|
P(OTILEOFF_0Y);
|
|
|
|
P(OTILEOFF_1Y);
|
|
|
|
P(OTILEOFF_0U);
|
|
|
|
P(OTILEOFF_0V);
|
|
|
|
P(OTILEOFF_1U);
|
|
|
|
P(OTILEOFF_1V);
|
|
|
|
P(FASTHSCALE);
|
|
|
|
P(UVSCALEV);
|
|
|
|
#undef P
|
|
|
|
}
|
2016-10-12 16:05:18 +07:00
|
|
|
|
|
|
|
#endif
|