mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-20 06:06:57 +07:00
drm/nouveau/mmu: directly use instmem for page tables
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
d8e83994aa
commit
d0659d3277
@ -53,7 +53,7 @@ void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
|
||||
|
||||
int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
|
||||
u32 align, u32 flags, struct nvkm_gpuobj **);
|
||||
int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
|
||||
int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *,
|
||||
struct nvkm_gpuobj **);
|
||||
int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
|
||||
int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
|
||||
|
@ -6,7 +6,7 @@ struct nvkm_device;
|
||||
struct nvkm_mem;
|
||||
|
||||
struct nvkm_vm_pgt {
|
||||
struct nvkm_gpuobj *obj[2];
|
||||
struct nvkm_memory *mem[2];
|
||||
u32 refcount[2];
|
||||
};
|
||||
|
||||
@ -53,13 +53,14 @@ struct nvkm_mmu {
|
||||
struct nvkm_vm **);
|
||||
|
||||
void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
|
||||
struct nvkm_gpuobj *pgt[2]);
|
||||
void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
|
||||
struct nvkm_memory *pgt[2]);
|
||||
void (*map)(struct nvkm_vma *, struct nvkm_memory *,
|
||||
struct nvkm_mem *, u32 pte, u32 cnt,
|
||||
u64 phys, u64 delta);
|
||||
void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
|
||||
void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
|
||||
struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
|
||||
void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
|
||||
void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
|
||||
u32 pte, u32 cnt);
|
||||
void (*flush)(struct nvkm_vm *);
|
||||
};
|
||||
|
||||
|
@ -308,7 +308,6 @@ static void
|
||||
nvkm_gpudup_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nvkm_gpuobj *gpuobj = (void *)object;
|
||||
nvkm_object_ref(NULL, (struct nvkm_object **)&gpuobj->parent);
|
||||
nvkm_object_destroy(&gpuobj->object);
|
||||
}
|
||||
|
||||
@ -323,7 +322,7 @@ nvkm_gpudup_oclass = {
|
||||
};
|
||||
|
||||
int
|
||||
nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
|
||||
nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_memory *base,
|
||||
struct nvkm_gpuobj **pgpuobj)
|
||||
{
|
||||
struct nvkm_gpuobj *gpuobj;
|
||||
@ -335,8 +334,7 @@ nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_object_ref(nv_object(base), (struct nvkm_object **)&gpuobj->parent);
|
||||
gpuobj->addr = base->addr;
|
||||
gpuobj->size = base->size;
|
||||
gpuobj->addr = nvkm_memory_addr(base);
|
||||
gpuobj->size = nvkm_memory_size(base);
|
||||
return 0;
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent,
|
||||
|
||||
if (dmaobj->clone) {
|
||||
struct nv04_mmu *mmu = nv04_mmu(dmaobj);
|
||||
struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
|
||||
struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
|
||||
if (!dmaobj->base.start)
|
||||
return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
|
||||
nvkm_kmap(pgt);
|
||||
|
@ -161,7 +161,7 @@ gf100_bar_dtor(struct nvkm_object *object)
|
||||
nvkm_gpuobj_ref(NULL, &bar->bar[1].mem);
|
||||
|
||||
if (bar->bar[0].vm) {
|
||||
nvkm_gpuobj_ref(NULL, &bar->bar[0].vm->pgt[0].obj[0]);
|
||||
nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
|
||||
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
|
||||
}
|
||||
nvkm_gpuobj_ref(NULL, &bar->bar[0].pgd);
|
||||
|
@ -207,7 +207,7 @@ nv50_bar_dtor(struct nvkm_object *object)
|
||||
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
|
||||
nvkm_gpuobj_ref(NULL, &bar->bar3);
|
||||
if (bar->bar3_vm) {
|
||||
nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]);
|
||||
nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
|
||||
nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
|
||||
}
|
||||
nvkm_gpuobj_ref(NULL, &bar->pgd);
|
||||
|
@ -46,7 +46,7 @@ nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
|
||||
u32 num = r->length >> bits;
|
||||
|
||||
while (num) {
|
||||
struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
@ -89,7 +89,7 @@ nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
|
||||
struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
|
||||
sglen = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
|
||||
end = pte + sglen;
|
||||
@ -145,7 +145,7 @@ nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
@ -193,14 +193,14 @@ nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
|
||||
u32 end, len;
|
||||
|
||||
while (num) {
|
||||
struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
|
||||
struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
|
||||
|
||||
end = (pte + num);
|
||||
if (unlikely(end >= max))
|
||||
end = max;
|
||||
len = end - pte;
|
||||
|
||||
mmu->unmap(pgt, pte, len);
|
||||
mmu->unmap(vma, pgt, pte, len);
|
||||
|
||||
num -= len;
|
||||
pte += len;
|
||||
@ -225,7 +225,7 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
struct nvkm_mmu *mmu = vm->mmu;
|
||||
struct nvkm_vm_pgd *vpgd;
|
||||
struct nvkm_vm_pgt *vpgt;
|
||||
struct nvkm_gpuobj *pgt;
|
||||
struct nvkm_memory *pgt;
|
||||
u32 pde;
|
||||
|
||||
for (pde = fpde; pde <= lpde; pde++) {
|
||||
@ -233,14 +233,14 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
|
||||
if (--vpgt->refcount[big])
|
||||
continue;
|
||||
|
||||
pgt = vpgt->obj[big];
|
||||
vpgt->obj[big] = NULL;
|
||||
pgt = vpgt->mem[big];
|
||||
vpgt->mem[big] = NULL;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
mmu->map_pgt(vpgd->obj, pde, vpgt->mem);
|
||||
}
|
||||
|
||||
nvkm_gpuobj_ref(NULL, &pgt);
|
||||
nvkm_memory_del(&pgt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,13 +257,13 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
|
||||
pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
|
||||
pgt_size *= 8;
|
||||
|
||||
ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &vpgt->obj[big]);
|
||||
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
|
||||
pgt_size, 0x1000, true, &vpgt->mem[big]);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(vpgd, &vm->pgd_list, head) {
|
||||
mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
|
||||
mmu->map_pgt(vpgd->obj, pde, vpgt->mem);
|
||||
}
|
||||
|
||||
vpgt->refcount[big]++;
|
||||
@ -342,16 +342,15 @@ int
|
||||
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
|
||||
{
|
||||
struct nvkm_mmu *mmu = vm->mmu;
|
||||
struct nvkm_gpuobj *pgt;
|
||||
struct nvkm_memory *pgt;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
|
||||
(size >> mmu->spg_shift) * 8, 0x1000,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &pgt);
|
||||
ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
|
||||
(size >> mmu->spg_shift) * 8, 0x1000, true, &pgt);
|
||||
if (ret == 0) {
|
||||
vm->pgt[0].refcount[0] = 1;
|
||||
vm->pgt[0].obj[0] = pgt;
|
||||
nvkm_memory_boot(pgt->memory, vm);
|
||||
vm->pgt[0].mem[0] = pgt;
|
||||
nvkm_memory_boot(pgt, vm);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -422,7 +421,7 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
for (i = vm->fpde; i <= vm->lpde; i++)
|
||||
mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
|
||||
mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
|
||||
list_add(&vpgd->head, &vm->pgd_list);
|
||||
mutex_unlock(&vm->mutex);
|
||||
return 0;
|
||||
|
@ -69,14 +69,14 @@ const u8 gf100_pte_storage_type_map[256] =
|
||||
|
||||
|
||||
static void
|
||||
gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
|
||||
gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
|
||||
{
|
||||
u32 pde[2] = { 0, 0 };
|
||||
|
||||
if (pgt[0])
|
||||
pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
|
||||
pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
|
||||
if (pgt[1])
|
||||
pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
|
||||
pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
|
||||
|
||||
nvkm_kmap(pgd);
|
||||
nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
|
||||
@ -99,7 +99,7 @@ gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
|
||||
{
|
||||
u64 next = 1 << (vma->node->type - 8);
|
||||
@ -126,7 +126,7 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
|
||||
@ -145,7 +145,7 @@ gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
nvkm_kmap(pgt);
|
||||
pte <<= 3;
|
||||
|
@ -33,7 +33,7 @@
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
pte = 0x00008 + (pte * 4);
|
||||
@ -52,7 +52,7 @@ nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte = 0x00008 + (pte * 4);
|
||||
nvkm_kmap(pgt);
|
||||
@ -88,8 +88,9 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nv04_mmu *mmu;
|
||||
struct nvkm_gpuobj *dma;
|
||||
struct nvkm_memory *dma;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
|
||||
@ -113,11 +114,10 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
|
||||
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
|
||||
16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&mmu->vm->pgt[0].obj[0]);
|
||||
dma = mmu->vm->pgt[0].obj[0];
|
||||
16, true, &dma);
|
||||
mmu->vm->pgt[0].mem[0] = dma;
|
||||
mmu->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -134,7 +134,7 @@ nv04_mmu_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)object;
|
||||
if (mmu->vm) {
|
||||
nvkm_gpuobj_ref(NULL, &mmu->vm->pgt[0].obj[0]);
|
||||
nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
|
||||
nvkm_vm_ref(NULL, &mmu->vm, NULL);
|
||||
}
|
||||
if (mmu->nullp) {
|
||||
|
@ -35,7 +35,7 @@
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
pte = pte * 4;
|
||||
@ -54,7 +54,7 @@ nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte = pte * 4;
|
||||
nvkm_kmap(pgt);
|
||||
@ -68,7 +68,7 @@ nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
static void
|
||||
nv41_vm_flush(struct nvkm_vm *vm)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)vm->mmu;
|
||||
struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
|
||||
struct nvkm_device *device = mmu->base.subdev.device;
|
||||
|
||||
mutex_lock(&nv_subdev(mmu)->mutex);
|
||||
@ -121,10 +121,9 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
|
||||
(NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&mmu->vm->pgt[0].obj[0]);
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
|
||||
(NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
|
||||
&mmu->vm->pgt[0].mem[0]);
|
||||
mmu->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -137,14 +136,14 @@ nv41_mmu_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)object;
|
||||
struct nvkm_device *device = mmu->base.subdev.device;
|
||||
struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
|
||||
struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
|
||||
int ret;
|
||||
|
||||
ret = nvkm_mmu_init(&mmu->base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
|
||||
nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
|
||||
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
|
||||
nvkm_wr32(device, 0x100820, 0x00000000);
|
||||
return 0;
|
||||
|
@ -35,7 +35,7 @@
|
||||
******************************************************************************/
|
||||
|
||||
static void
|
||||
nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
|
||||
nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
|
||||
dma_addr_t *list, u32 pte, u32 cnt)
|
||||
{
|
||||
u32 base = (pte << 2) & ~0x0000000f;
|
||||
@ -81,10 +81,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)vma->vm->mmu;
|
||||
struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
|
||||
u32 tmp[4];
|
||||
int i;
|
||||
|
||||
@ -114,9 +114,9 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
|
||||
struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
|
||||
|
||||
nvkm_kmap(pgt);
|
||||
if (pte & 3) {
|
||||
@ -143,7 +143,7 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
static void
|
||||
nv44_vm_flush(struct nvkm_vm *vm)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)vm->mmu;
|
||||
struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
|
||||
struct nvkm_device *device = mmu->base.subdev.device;
|
||||
nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
|
||||
nvkm_wr32(device, 0x100808, 0x00000020);
|
||||
@ -200,10 +200,10 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
|
||||
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
|
||||
512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&mmu->vm->pgt[0].obj[0]);
|
||||
512 * 1024, true,
|
||||
&mmu->vm->pgt[0].mem[0]);
|
||||
mmu->vm->pgt[0].refcount[0] = 1;
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -216,7 +216,7 @@ nv44_mmu_init(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_mmu *mmu = (void *)object;
|
||||
struct nvkm_device *device = mmu->base.subdev.device;
|
||||
struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
|
||||
struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
@ -229,7 +229,7 @@ nv44_mmu_init(struct nvkm_object *object)
|
||||
* of 512KiB for this to work correctly
|
||||
*/
|
||||
addr = nvkm_rd32(device, 0x10020c);
|
||||
addr -= ((gart->addr >> 19) + 1) << 19;
|
||||
addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
|
||||
|
||||
nvkm_wr32(device, 0x100850, 0x80000000);
|
||||
nvkm_wr32(device, 0x100818, mmu->null);
|
||||
|
@ -29,18 +29,20 @@
|
||||
#include <core/gpuobj.h>
|
||||
|
||||
static void
|
||||
nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
|
||||
nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
|
||||
{
|
||||
u64 phys = 0xdeadcafe00000000ULL;
|
||||
u32 coverage = 0;
|
||||
|
||||
if (pgt[0]) {
|
||||
phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
|
||||
coverage = (pgt[0]->size >> 3) << 12;
|
||||
/* present, 4KiB pages */
|
||||
phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
|
||||
coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
|
||||
} else
|
||||
if (pgt[1]) {
|
||||
phys = 0x00000001 | pgt[1]->addr; /* present */
|
||||
coverage = (pgt[1]->size >> 3) << 16;
|
||||
/* present, 64KiB pages */
|
||||
phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
|
||||
coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
|
||||
}
|
||||
|
||||
if (phys & 1) {
|
||||
@ -72,7 +74,7 @@ vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
|
||||
{
|
||||
u32 comp = (mem->memtype & 0x180) >> 7;
|
||||
@ -121,7 +123,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
|
||||
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
|
||||
{
|
||||
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
|
||||
@ -137,7 +139,7 @@ nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
|
||||
}
|
||||
|
||||
static void
|
||||
nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
|
||||
nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
|
||||
{
|
||||
pte <<= 3;
|
||||
nvkm_kmap(pgt);
|
||||
|
Loading…
Reference in New Issue
Block a user