author: Arnd Bergmann <arnd@arndb.de> 2023-03-27 14:13:08 +0200
committer: Emil Renner Berthing <emil.renner.berthing@canonical.com> 2023-07-20 20:53:34 +0200
commit: 578270809eb6b40c1295b58261dbeb3457b216ae
parent: 92251112c9fcf0b3767c9e5a0342b51207bd1875
Commit Summary:
Diffstat:
1 file changed, 30 insertions, 44 deletions
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index b4350faf4f1e..b9d68bcc5d53 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -54,50 +54,13 @@ void *arch_dma_set_uncached(void *addr, size_t size)
return (void *)(__pa(addr) + UNCAC_BASE);
}
-static inline void dma_sync_virt_for_device(void *addr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_TO_DEVICE:
- dma_cache_wback((unsigned long)addr, size);
- break;
- case DMA_FROM_DEVICE:
- dma_cache_inv((unsigned long)addr, size);
- break;
- case DMA_BIDIRECTIONAL:
- if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
- cpu_needs_post_dma_flush())
- dma_cache_wback((unsigned long)addr, size);
- else
- dma_cache_wback_inv((unsigned long)addr, size);
- break;
- default:
- BUG();
- }
-}
-
-static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_TO_DEVICE:
- break;
- case DMA_FROM_DEVICE:
- case DMA_BIDIRECTIONAL:
- dma_cache_inv((unsigned long)addr, size);
- break;
- default:
- BUG();
- }
-}
-
/*
* A single sg entry may refer to multiple physically contiguous pages. But
* we still need to process highmem pages individually. If highmem is not
* configured then the bulk of this loop gets optimized out.
*/
static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir, bool for_device)
+ void(*cache_op)(unsigned long start, unsigned long size))
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long offset = paddr & ~PAGE_MASK;
@@ -113,10 +76,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
}
addr = kmap_atomic(page);
- if (for_device)
- dma_sync_virt_for_device(addr + offset, len, dir);
- else
- dma_sync_virt_for_cpu(addr + offset, len, dir);
+ cache_op((unsigned long)addr + offset, len);
kunmap_atomic(addr);
offset = 0;
@@ -128,15 +88,40 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- dma_sync_phys(paddr, size, dir, true);
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ dma_sync_phys(paddr, size, _dma_cache_wback);
+ break;
+ case DMA_FROM_DEVICE:
+ dma_sync_phys(paddr, size, _dma_cache_inv);
+ break;
+ case DMA_BIDIRECTIONAL:
+ if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
+ cpu_needs_post_dma_flush())
+ dma_sync_phys(paddr, size, _dma_cache_wback);
+ else
+ dma_sync_phys(paddr, size, _dma_cache_wback_inv);
+ break;
+ default:
+ break;
+ }
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- if (cpu_needs_post_dma_flush())
- dma_sync_phys(paddr, size, dir, false);
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ if (cpu_needs_post_dma_flush())
+ dma_sync_phys(paddr, size, _dma_cache_inv);
+ break;
+ default:
+ break;
+ }
}
#endif