From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild | 6 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 480 ++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c | 237 ++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c | 151 ++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h | 19 + drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c | 157 ++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c | 247 ++++++++++++ drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c | 241 ++++++++++++ 8 files changed, 1538 insertions(+) create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c create mode 100644 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu') diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild new file mode 100644 index 000000000..012c9db68 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -0,0 +1,6 @@ +nvkm-y += nvkm/subdev/mmu/base.o +nvkm-y += nvkm/subdev/mmu/nv04.o +nvkm-y += nvkm/subdev/mmu/nv41.o +nvkm-y += nvkm/subdev/mmu/nv44.o +nvkm-y += nvkm/subdev/mmu/nv50.o +nvkm-y += nvkm/subdev/mmu/gf100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c new file mode 100644 index 000000000..277b6ec04 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c @@ -0,0 +1,480 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include +#include + +#include + +void +nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) +{ + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_mm_node *r; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + delta = 0; + list_for_each_entry(r, &node->regions, rl_entry) { + u64 phys = (u64)r->offset << 12; + u32 num = r->length >> bits; + + while (num) { + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->map(vma, pgt, node, pte, len, phys, delta); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + phys += len << (bits + 12); + pde++; + pte = 0; + } + + delta += (u64)len << vma->node->type; + } + } + + mmu->flush(vm); +} + +static void +nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, + struct nvkm_mem *mem) +{ + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + unsigned m, sglen; + u32 end, len; + int i; + struct scatterlist *sg; + + for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) { + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; + sglen = sg_dma_len(sg) >> PAGE_SHIFT; + + end = pte + sglen; + if (unlikely(end >= max)) + end = max; + len = end - pte; + + for (m = 0; m < len; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + mmu->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + + if (num == 0) + goto finish; + } + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + if (m < sglen) { + for (; m < sglen; m++) { + dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); + + mmu->map_sg(vma, pgt, mem, pte, 1, &addr); + num--; + pte++; + if (num == 0) + goto finish; + } + } + + } +finish: + mmu->flush(vm); +} + +static void +nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, + struct nvkm_mem *mem) +{ + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + dma_addr_t *list = mem->pages; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->map_sg(vma, pgt, mem, pte, len, list); + + num -= len; + pte += len; + list += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + mmu->flush(vm); +} + +void +nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) +{ + if (node->sg) + nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); + else + if (node->pages) + nvkm_vm_map_sg(vma, 0, node->size << 12, node); + else + nvkm_vm_map_at(vma, 0, node); +} + +void +nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) +{ + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + int big = vma->node->type != mmu->spg_shift; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> mmu->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits; + u32 max = 1 << (mmu->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big]; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + mmu->unmap(pgt, pte, len); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + mmu->flush(vm); +} + +void +nvkm_vm_unmap(struct nvkm_vma *vma) +{ + nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); +} + +static void +nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) +{ + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd; + struct nvkm_vm_pgt *vpgt; + struct nvkm_gpuobj *pgt; + u32 pde; + + for (pde = fpde; pde <= lpde; pde++) { + vpgt = &vm->pgt[pde - vm->fpde]; + if (--vpgt->refcount[big]) + continue; + + pgt = vpgt->obj[big]; + vpgt->obj[big] = NULL; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + mmu->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + mutex_unlock(&nv_subdev(mmu)->mutex); + nvkm_gpuobj_ref(NULL, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + } +} + +static int +nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type) +{ + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nvkm_vm_pgd *vpgd; + struct nvkm_gpuobj *pgt; + int big = (type != mmu->spg_shift); + u32 pgt_size; + int ret; + + pgt_size = (1 << (mmu->pgt_bits + 12)) >> type; + pgt_size *= 8; + + mutex_unlock(&nv_subdev(mmu)->mutex); + ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + if (unlikely(ret)) + return ret; + + /* someone beat us to filling the PDE while we didn't have the lock */ + if (unlikely(vpgt->refcount[big]++)) { + mutex_unlock(&nv_subdev(mmu)->mutex); + nvkm_gpuobj_ref(NULL, &pgt); + mutex_lock(&nv_subdev(mmu)->mutex); + return 0; + } + + vpgt->obj[big] = pgt; + list_for_each_entry(vpgd, &vm->pgd_list, head) { + mmu->map_pgt(vpgd->obj, pde, vpgt->obj); + } + + return 0; +} + +int +nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, + struct nvkm_vma *vma) +{ + struct nvkm_mmu *mmu = vm->mmu; + u32 align = (1 << page_shift) >> 12; + u32 msize = size >> 12; + u32 fpde, lpde, pde; + int ret; + + mutex_lock(&nv_subdev(mmu)->mutex); + ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align, + &vma->node); + if (unlikely(ret != 0)) { + mutex_unlock(&nv_subdev(mmu)->mutex); + return ret; + } + + fpde = (vma->node->offset >> mmu->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; + + for (pde = fpde; pde <= lpde; pde++) { + struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + int big = (vma->node->type != mmu->spg_shift); + + if (likely(vpgt->refcount[big])) { + vpgt->refcount[big]++; + continue; + } + + ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); + if (ret) { + if (pde != fpde) + nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1); + nvkm_mm_free(&vm->mm, &vma->node); + mutex_unlock(&nv_subdev(mmu)->mutex); + return ret; + } + } + mutex_unlock(&nv_subdev(mmu)->mutex); + + vma->vm = NULL; + nvkm_vm_ref(vm, &vma->vm, NULL); + vma->offset = (u64)vma->node->offset << 12; + vma->access = access; + return 0; +} + +void +nvkm_vm_put(struct nvkm_vma *vma) +{ + struct nvkm_vm *vm = vma->vm; + struct nvkm_mmu *mmu = vm->mmu; + u32 fpde, lpde; + + if (unlikely(vma->node == NULL)) + return; + fpde = (vma->node->offset >> mmu->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; + + mutex_lock(&nv_subdev(mmu)->mutex); + nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); + nvkm_mm_free(&vm->mm, &vma->node); + mutex_unlock(&nv_subdev(mmu)->mutex); + + nvkm_vm_ref(NULL, &vma->vm, NULL); +} + +int +nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, + u32 block, struct nvkm_vm **pvm) +{ + struct nvkm_vm *vm; + u64 mm_length = (offset + length) - mm_offset; + int ret; + + vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + INIT_LIST_HEAD(&vm->pgd_list); + vm->mmu = mmu; + kref_init(&vm->refcount); + vm->fpde = offset >> (mmu->pgt_bits + 12); + vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12); + + vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt)); + if (!vm->pgt) { + kfree(vm); + return -ENOMEM; + } + + ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); + if (ret) { + vfree(vm->pgt); + kfree(vm); + return ret; + } + + *pvm = vm; + + return 0; +} + +int +nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset, + struct nvkm_vm **pvm) +{ + struct nvkm_mmu *mmu = nvkm_mmu(device); + return mmu->create(mmu, offset, length, mm_offset, pvm); +} + +static int +nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd) +{ + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd; + int i; + + if (!pgd) + return 0; + + vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); + if (!vpgd) + return -ENOMEM; + + nvkm_gpuobj_ref(pgd, &vpgd->obj); + + mutex_lock(&nv_subdev(mmu)->mutex); + for (i = vm->fpde; i <= vm->lpde; i++) + mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); + list_add(&vpgd->head, &vm->pgd_list); + mutex_unlock(&nv_subdev(mmu)->mutex); + return 0; +} + +static void +nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd) +{ + struct nvkm_mmu *mmu = vm->mmu; + struct nvkm_vm_pgd *vpgd, *tmp; + struct nvkm_gpuobj *pgd = NULL; + + if (!mpgd) + return; + + mutex_lock(&nv_subdev(mmu)->mutex); + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + if (vpgd->obj == mpgd) { + pgd = vpgd->obj; + list_del(&vpgd->head); + kfree(vpgd); + break; + } + } + mutex_unlock(&nv_subdev(mmu)->mutex); + + nvkm_gpuobj_ref(NULL, &pgd); +} + +static void +nvkm_vm_del(struct kref *kref) +{ + struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount); + struct nvkm_vm_pgd *vpgd, *tmp; + + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + nvkm_vm_unlink(vm, vpgd->obj); + } + + nvkm_mm_fini(&vm->mm); + vfree(vm->pgt); + kfree(vm); +} + +int +nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd) +{ + if (ref) { + int ret = nvkm_vm_link(ref, pgd); + if (ret) + return ret; + + kref_get(&ref->refcount); + } + + if (*ptr) { + nvkm_vm_unlink(*ptr, pgd); + kref_put(&(*ptr)->refcount, nvkm_vm_del); + } + + *ptr = ref; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c new file mode 100644 index 000000000..294cda37f --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c @@ -0,0 +1,237 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include +#include +#include +#include +#include + +#include + +struct gf100_mmu_priv { + struct nvkm_mmu base; +}; + + +/* Map from compressed to corresponding uncompressed storage type. + * The value 0xff represents an invalid storage type. + */ +const u8 gf100_pte_storage_type_map[256] = +{ + 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */ + 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */ + 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */ + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */ + 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27, + 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */ + 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */ + 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */ + 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, + 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */ + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3, + 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */ + 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, + 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */ + 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */ + 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff +}; + + +static void +gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2]) +{ + u32 pde[2] = { 0, 0 }; + + if (pgt[0]) + pde[1] = 0x00000001 | (pgt[0]->addr >> 8); + if (pgt[1]) + pde[0] = 0x00000001 | (pgt[1]->addr >> 8); + + nv_wo32(pgd, (index * 8) + 0, pde[0]); + nv_wo32(pgd, (index * 8) + 4, pde[1]); +} + +static inline u64 +gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys >>= 8; + + phys |= 0x00000001; /* present */ + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= 0x00000002; + + phys |= ((u64)target << 32); + phys |= ((u64)memtype << 36); + return phys; +} + +static void +gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u64 next = 1 << (vma->node->type - 8); + + phys = gf100_vm_addr(vma, phys, mem->memtype, 0); + pte <<= 3; + + if (mem->tag) { + struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu); + u32 tag = mem->tag->offset + (delta >> 17); + phys |= (u64)tag << (32 + 12); + next |= (u64)1 << (32 + 12); + ltc->tags_clear(ltc, tag, cnt); + } + + while (cnt--) { + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + phys += next; + pte += 8; + } +} + +static void +gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; + /* compressed storage types are invalid for system memory */ + u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff]; + + pte <<= 3; + while (cnt--) { + u64 phys = gf100_vm_addr(vma, *list++, memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +static void +gf100_vm_flush(struct nvkm_vm *vm) +{ + struct gf100_mmu_priv *priv = (void *)vm->mmu; + struct nvkm_bar *bar = nvkm_bar(priv); + struct nvkm_vm_pgd *vpgd; + u32 type; + + bar->flush(bar); + + type = 0x00000001; /* PAGE_ALL */ + if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR])) + type |= 0x00000004; /* HUB_ONLY */ + + mutex_lock(&nv_subdev(priv)->mutex); + list_for_each_entry(vpgd, &vm->pgd_list, head) { + /* looks like maybe a "free flush slots" counter, the + * faster you write to 0x100cbc to more it decreases + */ + if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) { + nv_error(priv, "vm timeout 0: 0x%08x %d\n", + nv_rd32(priv, 0x100c80), type); + } + + nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8); + nv_wr32(priv, 0x100cbc, 0x80000000 | type); + + /* wait for flush to be queued? */ + if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) { + nv_error(priv, "vm timeout 1: 0x%08x %d\n", + nv_rd32(priv, 0x100c80), type); + } + } + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset, + struct nvkm_vm **pvm) +{ + return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm); +} + +static int +gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, + struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + struct gf100_mmu_priv *priv; + int ret; + + ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 27 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 17; + priv->base.create = gf100_vm_create; + priv->base.map_pgt = gf100_vm_map_pgt; + priv->base.map = gf100_vm_map; + priv->base.map_sg = gf100_vm_map_sg; + priv->base.unmap = gf100_vm_unmap; + priv->base.flush = gf100_vm_flush; + return 0; +} + +struct nvkm_oclass +gf100_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0xc0), + .ofuncs = &(struct nvkm_ofuncs) { + .ctor = gf100_mmu_ctor, + .dtor = _nvkm_mmu_dtor, + .init = _nvkm_mmu_init, + .fini = _nvkm_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c new file mode 100644 index 000000000..fe93ea271 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c @@ -0,0 +1,151 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "nv04.h" + +#include +#include + +#define NV04_PDMA_SIZE (128 * 1024 * 1024) +#define NV04_PDMA_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = 0x00008 + (pte * 4); + while (cnt) { + u32 page = PAGE_SIZE / NV04_PDMA_PAGE; + u32 phys = (u32)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, phys | 3); + phys += NV04_PDMA_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = 0x00008 + (pte * 4); + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv04_vm_flush(struct nvkm_vm *vm) +{ +} + +/******************************************************************************* + * VM object + ******************************************************************************/ + +int +nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart, + struct nvkm_vm **pvm) +{ + return -EINVAL; +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, + struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + struct nv04_mmu_priv *priv; + struct nvkm_gpuobj *dma; + int ret; + + ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART", + "pcigart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV04_PDMA_SIZE; + priv->base.dma_bits = 32; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv04_vm_map_sg; + priv->base.unmap = nv04_vm_unmap; + priv->base.flush = nv04_vm_flush; + + ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nvkm_gpuobj_new(nv_object(priv), NULL, + (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, + 16, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + dma = priv->vm->pgt[0].obj[0]; + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ + nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); + return 0; +} + +void +nv04_mmu_dtor(struct nvkm_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + if (priv->vm) { + nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]); + nvkm_vm_ref(NULL, &priv->vm, NULL); + } + if (priv->nullp) { + pci_free_consistent(nv_device(priv)->pdev, 16 * 1024, + priv->nullp, priv->null); + } + nvkm_mmu_destroy(&priv->base); +} + +struct nvkm_oclass +nv04_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x04), + .ofuncs = &(struct nvkm_ofuncs) { + .ctor = nv04_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = _nvkm_mmu_init, + .fini = _nvkm_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h new file mode 100644 index 000000000..7bf6f4b38 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h @@ -0,0 +1,19 @@ +#ifndef __NV04_MMU_PRIV__ +#define __NV04_MMU_PRIV__ + +#include + +struct nv04_mmu_priv { + struct nvkm_mmu base; + struct nvkm_vm *vm; + dma_addr_t null; + void *nullp; +}; + +static inline struct nv04_mmu_priv * +nv04_mmu(void *obj) +{ + return (void *)nvkm_mmu(obj); +} + +#endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c new file mode 100644 index 000000000..61ee3ab11 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c @@ -0,0 +1,157 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "nv04.h" + +#include +#include +#include +#include + +#define NV41_GART_SIZE (512 * 1024 * 1024) +#define NV41_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + pte = pte * 4; + while (cnt) { + u32 page = PAGE_SIZE / NV41_GART_PAGE; + u64 phys = (u64)*list++; + while (cnt && page--) { + nv_wo32(pgt, pte, (phys >> 7) | 1); + phys += NV41_GART_PAGE; + pte += 4; + cnt -= 1; + } + } +} + +static void +nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte = pte * 4; + while (cnt--) { + nv_wo32(pgt, pte, 0x00000000); + pte += 4; + } +} + +static void +nv41_vm_flush(struct nvkm_vm *vm) +{ + struct nv04_mmu_priv *priv = (void *)vm->mmu; + + mutex_lock(&nv_subdev(priv)->mutex); + nv_wr32(priv, 0x100810, 0x00000022); + if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) { + nv_warn(priv, "flush timeout, 0x%08x\n", + nv_rd32(priv, 0x100810)); + } + nv_wr32(priv, 0x100810, 0x00000000); + mutex_unlock(&nv_subdev(priv)->mutex); +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, + struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + struct nvkm_device *device = nv_device(parent); + struct nv04_mmu_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) { + return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass, + data, size, pobject); + } + + ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV41_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv41_vm_map_sg; + priv->base.unmap = nv41_vm_unmap; + priv->base.flush = nv41_vm_flush; + + ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nvkm_gpuobj_new(nv_object(priv), NULL, + (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv41_mmu_init(struct nvkm_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0]; + int ret; + + ret = nvkm_mmu_init(&priv->base); + if (ret) + return ret; + + nv_wr32(priv, 0x100800, dma->addr | 0x00000002); + nv_mask(priv, 0x10008c, 0x00000100, 0x00000100); + nv_wr32(priv, 0x100820, 0x00000000); + return 0; +} + +struct nvkm_oclass +nv41_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x41), + .ofuncs = &(struct nvkm_ofuncs) { + .ctor = nv41_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = nv41_mmu_init, + .fini = _nvkm_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c new file mode 100644 index 000000000..b90ded188 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c @@ -0,0 +1,247 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "nv04.h" + +#include +#include +#include +#include + +#define NV44_GART_SIZE (512 * 1024 * 1024) +#define NV44_GART_PAGE ( 4 * 1024) + +/******************************************************************************* + * VM map/unmap callbacks + ******************************************************************************/ + +static void +nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null, + dma_addr_t *list, u32 pte, u32 cnt) +{ + u32 base = (pte << 2) & ~0x0000000f; + u32 tmp[4]; + + tmp[0] = nv_ro32(pgt, base + 0x0); + tmp[1] = nv_ro32(pgt, base + 0x4); + tmp[2] = nv_ro32(pgt, base + 0x8); + tmp[3] = nv_ro32(pgt, base + 0xc); + + while (cnt--) { + u32 addr = list ? (*list++ >> 12) : (null >> 12); + switch (pte++ & 0x3) { + case 0: + tmp[0] &= ~0x07ffffff; + tmp[0] |= addr; + break; + case 1: + tmp[0] &= ~0xf8000000; + tmp[0] |= addr << 27; + tmp[1] &= ~0x003fffff; + tmp[1] |= addr >> 5; + break; + case 2: + tmp[1] &= ~0xffc00000; + tmp[1] |= addr << 22; + tmp[2] &= ~0x0001ffff; + tmp[2] |= addr >> 10; + break; + case 3: + tmp[2] &= ~0xfffe0000; + tmp[2] |= addr << 17; + tmp[3] &= ~0x00000fff; + tmp[3] |= addr >> 15; + break; + } + } + + nv_wo32(pgt, base + 0x0, tmp[0]); + nv_wo32(pgt, base + 0x4, tmp[1]); + nv_wo32(pgt, base + 0x8, tmp[2]); + nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); +} + +static void +nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + struct nv04_mmu_priv *priv = (void *)vma->vm->mmu; + u32 tmp[4]; + int i; + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, list, pte, part); + pte += part; + list += part; + cnt -= part; + } + + while (cnt >= 4) { + for (i = 0; i < 4; i++) + tmp[i] = *list++ >> 12; + nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); + nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); + nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); + nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, list, pte, cnt); +} + +static void +nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +{ + struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt); + + if (pte & 3) { + u32 max = 4 - (pte & 3); + u32 part = (cnt > max) ? max : cnt; + nv44_vm_fill(pgt, priv->null, NULL, pte, part); + pte += part; + cnt -= part; + } + + while (cnt >= 4) { + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + nv_wo32(pgt, pte++ * 4, 0x00000000); + cnt -= 4; + } + + if (cnt) + nv44_vm_fill(pgt, priv->null, NULL, pte, cnt); +} + +static void +nv44_vm_flush(struct nvkm_vm *vm) +{ + struct nv04_mmu_priv *priv = (void *)vm->mmu; + nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE); + nv_wr32(priv, 0x100808, 0x00000020); + if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001)) + nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808)); + nv_wr32(priv, 0x100808, 0x00000000); +} + +/******************************************************************************* + * MMU subdev + ******************************************************************************/ + +static int +nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, + struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + struct nvkm_device *device = nv_device(parent); + struct nv04_mmu_priv *priv; + int ret; + + if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) || + !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) { + return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass, + data, size, pobject); + } + + ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART", + "pciegart", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.create = nv04_vm_create; + priv->base.limit = NV44_GART_SIZE; + priv->base.dma_bits = 39; + priv->base.pgt_bits = 32 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 12; + priv->base.map_sg = nv44_vm_map_sg; + priv->base.unmap = nv44_vm_unmap; + priv->base.flush = nv44_vm_flush; + + priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null); + if (!priv->nullp) { + nv_error(priv, "unable to allocate dummy pages\n"); + return -ENOMEM; + } + + ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096, + &priv->vm); + if (ret) + return ret; + + ret = nvkm_gpuobj_new(nv_object(priv), NULL, + (NV44_GART_SIZE / NV44_GART_PAGE) * 4, + 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC, + &priv->vm->pgt[0].obj[0]); + priv->vm->pgt[0].refcount[0] = 1; + if (ret) + return ret; + + return 0; +} + +static int +nv44_mmu_init(struct nvkm_object *object) +{ + struct nv04_mmu_priv *priv = (void *)object; + struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0]; + u32 addr; + int ret; + + ret = nvkm_mmu_init(&priv->base); + if (ret) + return ret; + + /* calculate vram address of this PRAMIN block, object must be + * allocated on 512KiB alignment, and not exceed a total size + * of 512KiB for this to work correctly + */ + addr = nv_rd32(priv, 0x10020c); + addr -= ((gart->addr >> 19) + 1) << 19; + + nv_wr32(priv, 0x100850, 0x80000000); + nv_wr32(priv, 0x100818, priv->null); + nv_wr32(priv, 0x100804, NV44_GART_SIZE); + nv_wr32(priv, 0x100850, 0x00008000); + nv_mask(priv, 0x10008c, 0x00000200, 0x00000200); + nv_wr32(priv, 0x100820, 0x00000000); + nv_wr32(priv, 0x10082c, 0x00000001); + nv_wr32(priv, 0x100800, addr | 0x00000010); + return 0; +} + +struct nvkm_oclass +nv44_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x44), + .ofuncs = &(struct nvkm_ofuncs) { + .ctor = nv44_mmu_ctor, + .dtor = nv04_mmu_dtor, + .init = nv44_mmu_init, + .fini = _nvkm_mmu_fini, + }, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c new file mode 100644 index 000000000..b83550fa7 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c @@ -0,0 +1,241 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include +#include +#include +#include + +#include +#include + +struct nv50_mmu_priv { + struct nvkm_mmu base; +}; + +static void +nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2]) +{ + u64 phys = 0xdeadcafe00000000ULL; + u32 coverage = 0; + + if (pgt[0]) { + phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */ + coverage = (pgt[0]->size >> 3) << 12; + } else + if (pgt[1]) { + phys = 0x00000001 | pgt[1]->addr; /* present */ + coverage = (pgt[1]->size >> 3) << 16; + } + + if (phys & 1) { + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage <= 128 * 1024 * 1024) + phys |= 0x20; + } + + nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); + nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); +} + +static inline u64 +vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys |= 1; /* present */ + phys |= (u64)memtype << 40; + phys |= target << 4; + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= (1 << 6); + if (!(vma->access & NV_MEM_ACCESS_WO)) + phys |= (1 << 3); + return phys; +} + +static void +nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) +{ + u32 comp = (mem->memtype & 0x180) >> 7; + u32 block, target; + int i; + + /* IGPs don't have real VRAM, re-target to stolen system memory */ + target = 0; + if (nvkm_fb(vma->vm->mmu)->ram->stolen) { + phys += nvkm_fb(vma->vm->mmu)->ram->stolen; + target = 3; + } + + phys = vm_addr(vma, phys, mem->memtype, target); + pte <<= 3; + cnt <<= 3; + + while (cnt) { + u32 offset_h = upper_32_bits(phys); + u32 offset_l = lower_32_bits(phys); + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 3); + if (cnt >= block && !(pte & (block - 1))) + break; + } + offset_l |= (i << 7); + + phys += block << (vma->node->type - 3); + cnt -= block; + if (comp) { + u32 tag = mem->tag->offset + ((delta >> 16) * comp); + offset_h |= (tag << 17); + delta += block << (vma->node->type - 3); + } + + while (block) { + nv_wo32(pgt, pte + 0, offset_l); + nv_wo32(pgt, pte + 4, offset_h); + pte += 8; + block -= 8; + } + } +} + +static void +nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, + struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) +{ + u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; + pte <<= 3; + while (cnt--) { + u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +static void +nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +static void +nv50_vm_flush(struct nvkm_vm *vm) +{ + struct nv50_mmu_priv *priv = (void *)vm->mmu; + struct nvkm_bar *bar = nvkm_bar(priv); + struct nvkm_engine *engine; + int i, vme; + + bar->flush(bar); + + mutex_lock(&nv_subdev(priv)->mutex); + for (i = 0; i < NVDEV_SUBDEV_NR; i++) { + if (!atomic_read(&vm->engref[i])) + continue; + + /* unfortunate hw bug workaround... */ + engine = nvkm_engine(priv, i); + if (engine && engine->tlb_flush) { + engine->tlb_flush(engine); + continue; + } + + switch (i) { + case NVDEV_ENGINE_GR : vme = 0x00; break; + case NVDEV_ENGINE_VP : + case NVDEV_ENGINE_MSPDEC: vme = 0x01; break; + case NVDEV_SUBDEV_BAR : vme = 0x06; break; + case NVDEV_ENGINE_MSPPP : + case NVDEV_ENGINE_MPEG : vme = 0x08; break; + case NVDEV_ENGINE_BSP : + case NVDEV_ENGINE_MSVLD : vme = 0x09; break; + case NVDEV_ENGINE_CIPHER: + case NVDEV_ENGINE_SEC : vme = 0x0a; break; + case NVDEV_ENGINE_CE0 : vme = 0x0d; break; + default: + continue; + } + + nv_wr32(priv, 0x100c80, (vme << 16) | 1); + if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) + nv_error(priv, "vm flush timeout: engine %d\n", vme); + } + mutex_unlock(&nv_subdev(priv)->mutex); +} + +static int +nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, + u64 mm_offset, struct nvkm_vm **pvm) +{ + u32 block = (1 << (mmu->pgt_bits + 12)); + if (block > length) + block = length; + + return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm); +} + +static int +nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, + struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + struct nv50_mmu_priv *priv; + int ret; + + ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv); + *pobject = nv_object(priv); + if (ret) + return ret; + + priv->base.limit = 1ULL << 40; + priv->base.dma_bits = 40; + priv->base.pgt_bits = 29 - 12; + priv->base.spg_shift = 12; + priv->base.lpg_shift = 16; + priv->base.create = nv50_vm_create; + priv->base.map_pgt = nv50_vm_map_pgt; + priv->base.map = nv50_vm_map; + priv->base.map_sg = nv50_vm_map_sg; + priv->base.unmap = nv50_vm_unmap; + priv->base.flush = nv50_vm_flush; + return 0; +} + +struct nvkm_oclass +nv50_mmu_oclass = { + .handle = NV_SUBDEV(MMU, 0x50), + .ofuncs = &(struct nvkm_ofuncs) { + .ctor = nv50_mmu_ctor, + .dtor = _nvkm_mmu_dtor, + .init = _nvkm_mmu_init, + .fini = _nvkm_mmu_fini, + }, +}; -- cgit v1.2.3