linux_dsm_epyc7002/drivers/gpu/drm/drm_cache.c
Chris Wilson 396f5d62d1 drm: Restore double clflush on the last partial cacheline
This effectively reverts

commit afcd950caf
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Wed Jun 10 15:58:01 2015 +0100

    drm: Avoid the double clflush on the last cache line in drm_clflush_virt_range()

as we have observed issues with serialisation of the clflush operations
on Baytrail+ Atoms with partial updates. Applying the double flush on the
last cacheline forces that clflush to be ordered with respect to the
previous clflush, and the mfence then protects against prefetches crossing
the clflush boundary.

The same issue can be demonstrated in userspace with igt/gem_exec_flush.

Fixes: afcd950caf (drm: Avoid the double clflush on the last cache...)
Testcase: igt/gem_concurrent_blit
Testcase: igt/gem_partial_pread_pwrite
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=92845
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: dri-devel@lists.freedesktop.org
Cc: Akash Goel <akash.goel@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jason Ekstrand <jason.ekstrand@intel.com>
Cc: stable@vger.kernel.org
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1467880930-23082-6-git-send-email-chris@chris-wilson.co.uk
2016-07-12 15:57:13 +02:00

152 lines
4.0 KiB
C

/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <linux/export.h>
#include <drm/drmP.h>
#if defined(CONFIG_X86)
#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
* in the caller.
*/
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
const int size = boot_cpu_data.x86_clflush_size;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size)
clflushopt(page_virtual + i);
kunmap_atomic(page_virtual);
}
static void drm_cache_flush_clflush(struct page *pages[],
unsigned long num_pages)
{
unsigned long i;
mb();
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
mb();
}
#endif
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
drm_cache_flush_clflush(pages, num_pages);
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#elif defined(__powerpc__)
unsigned long i;
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
if (unlikely(page == NULL))
continue;
page_virtual = kmap_atomic(page);
flush_dcache_range((unsigned long)page_virtual,
(unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual);
}
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_sg);
void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
mb();
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
mb();
return;
}
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
WARN_ON_ONCE(1);
#endif
}
EXPORT_SYMBOL(drm_clflush_virt_range);