summaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/dma.c12
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arc/mm/highmem.c1
-rw-r--r--arch/arc/mm/init.c2
-rw-r--r--arch/arc/mm/ioremap.c2
5 files changed, 10 insertions, 9 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index ab74b5d91..20afc65e2 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -22,7 +22,7 @@
static void *arc_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
@@ -46,7 +46,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
- dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ (attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
/*
@@ -90,13 +90,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
}
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
- is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+ is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
if (PageHighMem(page) || !is_non_coh)
@@ -130,7 +130,7 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
@@ -138,7 +138,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
}
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index af63f4a13..e94e5aa33 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -137,7 +137,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
if (unlikely(fatal_signal_pending(current))) {
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 04f83322c..77ff64a87 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -61,6 +61,7 @@ void *kmap(struct page *page)
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
void *kmap_atomic(struct page *page)
{
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 8be930394..399e2f223 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -220,7 +220,7 @@ void __init mem_init(void)
/*
* free_initmem: Free all the __init memory.
*/
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
free_initmem_default(-1);
}
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 49b8abd11..f52b7db67 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(ioremap);
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - "forced" uncached.
- * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/