iommu/tegra-smmu: Fix unmap() method

The Tegra SMMU unmap path has several problems:
1. as_pte_put() can perform a write-after-free
2. tegra_smmu_unmap() can perform cache maintanence on a page we have
   just freed.
3. when a page table is unmapped, there is no CPU cache maintanence of
   the write clearing the page directory entry, nor is there any
   maintanence of the IOMMU to ensure that it sees the page table has
   gone.

Fix this by getting rid of as_pte_put(), and instead coding the PTE
unmap separately from the PDE unmap, placing the PDE unmap after the
PTE unmap has been completed.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
Russell King 2015-07-27 13:29:05 +01:00 committed by Thierry Reding
parent 9113785c3e
commit b98e34f0c6

View File

@ -509,29 +509,35 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
return &pt[pte]; return &pt[pte];
} }
static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
{ {
struct tegra_smmu *smmu = as->smmu;
u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
u32 *count = page_address(as->count); u32 *count = page_address(as->count);
u32 *pd = page_address(as->pd), *pt; u32 *pd = page_address(as->pd);
struct page *page; struct page *page;
page = pfn_to_page(pd[pde] & as->smmu->pfn_mask); page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page);
/* /*
* When no entries in this page table are used anymore, return the * When no entries in this page table are used anymore, return the
* memory page to the system. * memory page to the system.
*/ */
if (pt[pte] != 0) { if (--count[pde] == 0) {
if (--count[pde] == 0) { unsigned int offset = pde * sizeof(*pd);
ClearPageReserved(page);
__free_page(page);
pd[pde] = 0;
}
pt[pte] = 0; /* Clear the page directory entry first */
pd[pde] = 0;
/* Flush the page directory entry */
smmu->soc->ops->flush_dcache(as->pd, offset, sizeof(*pd));
smmu_flush_ptc(smmu, as->pd, offset);
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
/* Finally, free the page */
ClearPageReserved(page);
__free_page(page);
} }
} }
@ -569,17 +575,20 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
u32 *pte; u32 *pte;
pte = as_get_pte(as, iova, &page); pte = as_get_pte(as, iova, &page);
if (!pte) if (!pte || !*pte)
return 0; return 0;
*pte = 0;
offset = offset_in_page(pte); offset = offset_in_page(pte);
as_put_pte(as, iova);
smmu->soc->ops->flush_dcache(page, offset, 4); smmu->soc->ops->flush_dcache(page, offset, 4);
smmu_flush_ptc(smmu, page, offset); smmu_flush_ptc(smmu, page, offset);
smmu_flush_tlb_group(smmu, as->id, iova); smmu_flush_tlb_group(smmu, as->id, iova);
smmu_flush(smmu); smmu_flush(smmu);
tegra_smmu_pte_put_use(as, iova);
return size; return size;
} }