drm/nouveau/fb/gf100: defer DMA mapping of scratch page to oneinit() hook

The 100c10 scratch page is mapped using dma_map_page() before the TTM
layer has had a chance to set the DMA mask. This means we are still
running with the default of 32 when this code executes, and this causes
problems for platforms with no memory below 4 GB (such as AMD Seattle)

So move the dma_map_page() to the .oneinit hook, which executes after the
DMA mask has been set.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ard Biesheuvel 2016-10-06 16:49:29 +01:00 committed by Ben Skeggs
parent 38f5359354
commit ebf7655aeb

View File

@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
gf100_fb_oneinit(struct nvkm_fb *fb)
gf100_fb_oneinit(struct nvkm_fb *base)
{
struct nvkm_device *device = fb->subdev.device;
struct gf100_fb *fb = gf100_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
false, &fb->mmu_rd);
false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
false, &fb->mmu_wr);
false, &fb->base.mmu_wr);
if (ret)
return ret;
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c10_page) {
fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device->dev, fb->r100c10))
return -EFAULT;
}
return 0;
}
@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c10_page) {
fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device->dev, fb->r100c10))
return -EFAULT;
}
return 0;
}