mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:30:58 +07:00
drm/i915: error capture with no ggtt slot
If the aperture is not available in HW we can't use a ggtt slot and wc copy, so fall back to regular kmap. Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191029095856.25431-4-matthew.auld@intel.com
This commit is contained in:
parent
cd20c70bb0
commit
895d8ebeaa
@ -2661,6 +2661,7 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
|
||||
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
|
||||
{
|
||||
ggtt_release_guc_top(ggtt);
|
||||
if (drm_mm_node_allocated(&ggtt->error_capture))
|
||||
drm_mm_remove_node(&ggtt->error_capture);
|
||||
}
|
||||
|
||||
@ -2692,6 +2693,7 @@ static int init_ggtt(struct i915_ggtt *ggtt)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ggtt->mappable_end) {
|
||||
/* Reserve a mappable slot for our lockless error capture */
|
||||
ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
|
||||
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
|
||||
@ -2699,6 +2701,7 @@ static int init_ggtt(struct i915_ggtt *ggtt)
|
||||
DRM_MM_INSERT_LOW);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The upper portion of the GuC address space has a sizeable hole
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "display/intel_overlay.h"
|
||||
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gpu_error.h"
|
||||
@ -235,6 +236,7 @@ struct compress {
|
||||
struct pagevec pool;
|
||||
struct z_stream_s zstream;
|
||||
void *tmp;
|
||||
bool wc;
|
||||
};
|
||||
|
||||
static bool compress_init(struct compress *c)
|
||||
@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
zstream->next_in = src;
|
||||
if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
|
||||
if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
|
||||
zstream->next_in = c->tmp;
|
||||
zstream->avail_in = PAGE_SIZE;
|
||||
|
||||
@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
|
||||
|
||||
struct compress {
|
||||
struct pagevec pool;
|
||||
bool wc;
|
||||
};
|
||||
|
||||
static bool compress_init(struct compress *c)
|
||||
@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
|
||||
if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
|
||||
memcpy(ptr, src, PAGE_SIZE);
|
||||
dst->pages[dst->page_count++] = ptr;
|
||||
|
||||
@ -966,7 +969,6 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
struct drm_i915_error_object *dst;
|
||||
unsigned long num_pages;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t dma;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
@ -992,11 +994,17 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
dst->page_count = 0;
|
||||
dst->unused = 0;
|
||||
|
||||
ret = -EINVAL;
|
||||
for_each_sgt_daddr(dma, iter, vma->pages) {
|
||||
void __iomem *s;
|
||||
compress->wc = i915_gem_object_is_lmem(vma->obj) ||
|
||||
drm_mm_node_allocated(&ggtt->error_capture);
|
||||
|
||||
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
|
||||
ret = -EINVAL;
|
||||
if (drm_mm_node_allocated(&ggtt->error_capture)) {
|
||||
void __iomem *s;
|
||||
dma_addr_t dma;
|
||||
|
||||
for_each_sgt_daddr(dma, iter, vma->pages) {
|
||||
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
|
||||
I915_CACHE_NONE, 0);
|
||||
|
||||
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
|
||||
ret = compress_page(compress, (void __force *)s, dst);
|
||||
@ -1004,6 +1012,37 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
} else if (i915_gem_object_is_lmem(vma->obj)) {
|
||||
struct intel_memory_region *mem = vma->obj->mm.region;
|
||||
dma_addr_t dma;
|
||||
|
||||
for_each_sgt_daddr(dma, iter, vma->pages) {
|
||||
void __iomem *s;
|
||||
|
||||
s = io_mapping_map_atomic_wc(&mem->iomap, dma);
|
||||
ret = compress_page(compress, (void __force *)s, dst);
|
||||
io_mapping_unmap_atomic(s);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
struct page *page;
|
||||
|
||||
for_each_sgt_page(page, iter, vma->pages) {
|
||||
void *s;
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
s = kmap_atomic(page);
|
||||
ret = compress_page(compress, s, dst);
|
||||
kunmap_atomic(s);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret || compress_flush(compress, dst)) {
|
||||
while (dst->page_count--)
|
||||
@ -1657,10 +1696,13 @@ static void capture_params(struct i915_gpu_state *error)
|
||||
static void capture_finish(struct i915_gpu_state *error)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &error->i915->ggtt;
|
||||
|
||||
if (drm_mm_node_allocated(&ggtt->error_capture)) {
|
||||
const u64 slot = ggtt->error_capture.start;
|
||||
|
||||
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user