aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-13 22:00:17 -0800
committerDavid S. Miller <davem@davemloft.net>2019-02-13 22:00:17 -0800
commit30580b33a21bfb37ef825b786c411b167baebd24 (patch)
tree148c75447c7e21ea390e5cd357304d8c76c86fb0
parentfb14b096355b8c947a256e8e5259a4ebaca00866 (diff)
parent13f16d9d4ab7fccc918aafb146ea043be9574d49 (diff)
downloadnet-30580b33a21bfb37ef825b786c411b167baebd24.tar.gz
Merge branch 'pagepool-api-and-dma-address-storage'
Jesper Dangaard Brouer says: ==================== Fix page_pool API and dma address storage As pointed out by David Miller in [1] the current page_pool implementation stores dma_addr_t in page->private. This won't work on 32-bit platforms with 64-bit DMA addresses since the page->private is an unsigned long and the dma_addr_t a u64. Since no driver is yet using the DMA mapping capabilities of the API let's fix this by storing the information in 'struct page' and use that to store and retrieve DMA addresses from network drivers. As long as the addresses returned from dma_map_page() are aligned the first bit, used by the compound pages code should not be set. Ilias tested the first two patches on Espressobin driver mvneta, for which we have patches for using the DMA API of page_pool. [1]: https://lore.kernel.org/netdev/20181207.230655.1261252486319967024.davem@davemloft.net/ ==================== Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--net/core/page_pool.c22
2 files changed, 21 insertions, 8 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2c471a2c43fa71..0a36a22228e75a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -95,6 +95,13 @@ struct page {
*/
unsigned long private;
};
+ struct { /* page_pool used by netstack */
+ /**
+ * @dma_addr: might require a 64-bit value even on
+ * 32-bit architectures.
+ */
+ dma_addr_t dma_addr;
+ };
struct { /* slab, slob and slub */
union {
struct list_head slab_list; /* uses lru */
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 43a932cb609b78..5b2252c6d49baf 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -136,17 +136,19 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_map;
- /* Setup DMA mapping: use page->private for DMA-addr
+ /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
+ * since dma_addr_t can be either 32 or 64 bits and does not always fit
+ * into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page(pool->p.dev, page, 0,
- (PAGE_SIZE << pool->p.order),
- pool->p.dma_dir);
+ dma = dma_map_page_attrs(pool->p.dev, page, 0,
+ (PAGE_SIZE << pool->p.order),
+ pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma)) {
put_page(page);
return NULL;
}
- set_page_private(page, dma); /* page->private = dma; */
+ page->dma_addr = dma;
skip_dma_map:
/* When page just alloc'ed is should/must have refcnt 1. */
@@ -175,13 +177,17 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
+ dma_addr_t dma;
+
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return;
+ dma = page->dma_addr;
/* DMA unmap */
- dma_unmap_page(pool->p.dev, page_private(page),
- PAGE_SIZE << pool->p.order, pool->p.dma_dir);
- set_page_private(page, 0);
+ dma_unmap_page_attrs(pool->p.dev, dma,
+ PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page->dma_addr = 0;
}
/* Return a page to the page allocator, cleaning up our state */