diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /include/linux/pmem.h | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'include/linux/pmem.h')
-rw-r--r-- | include/linux/pmem.h | 115 |
1 files changed, 84 insertions, 31 deletions
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index d2114045a..85f810b33 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -14,28 +14,42 @@ #define __PMEM_H__ #include <linux/io.h> +#include <linux/uio.h> #ifdef CONFIG_ARCH_HAS_PMEM_API -#include <asm/cacheflush.h> +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB +#include <asm/pmem.h> #else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT +/* + * These are simply here to enable compilation, all call sites gate + * calling these symbols with arch_has_pmem_api() and redirect to the + * implementation in asm/pmem.h. + */ +static inline bool __arch_has_wmb_pmem(void) +{ + return false; +} + static inline void arch_wmb_pmem(void) { BUG(); } -static inline bool __arch_has_wmb_pmem(void) +static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, + size_t n) { - return false; + BUG(); } -static inline void __pmem *arch_memremap_pmem(resource_size_t offset, - unsigned long size) +static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, + struct iov_iter *i) { - return NULL; + BUG(); + return 0; } -static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, - size_t n) +static inline void arch_clear_pmem(void __pmem *addr, size_t size) { BUG(); } @@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, /* * Architectures that define ARCH_HAS_PMEM_API must provide - * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), - * arch_wmb_pmem(), and __arch_has_wmb_pmem(). + * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), + * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). */ - static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) { memcpy(dst, (void __force const *) src, size); } -static inline void memunmap_pmem(void __pmem *addr) +static inline void memunmap_pmem(struct device *dev, void __pmem *addr) +{ + devm_memunmap(dev, (void __force *) addr); +} + +static inline bool arch_has_pmem_api(void) { - iounmap((void __force __iomem *) addr); + return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); } /** @@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr) */ static inline bool arch_has_wmb_pmem(void) { - if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) - return __arch_has_wmb_pmem(); - return false; -} - -static inline bool arch_has_pmem_api(void) -{ - return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); + return arch_has_pmem_api() && __arch_has_wmb_pmem(); } /* @@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void) * default_memremap_pmem + default_memcpy_to_pmem is sufficient for * making data durable relative to i/o completion. */ -static void default_memcpy_to_pmem(void __pmem *dst, const void *src, +static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, size_t size) { memcpy((void __force *) dst, src, size); } -static void __pmem *default_memremap_pmem(resource_size_t offset, - unsigned long size) +static inline size_t default_copy_from_iter_pmem(void __pmem *addr, + size_t bytes, struct iov_iter *i) +{ + return copy_from_iter_nocache((void __force *)addr, bytes, i); +} + +static inline void default_clear_pmem(void __pmem *addr, size_t size) { - return (void __pmem __force *)ioremap_wt(offset, size); + if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) + clear_page((void __force *)addr); + else + memset((void __force *)addr, 0, size); } /** @@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset, * wmb_pmem() arrange for the data to be written through the * cache to persistent media. */ -static inline void __pmem *memremap_pmem(resource_size_t offset, - unsigned long size) +static inline void __pmem *memremap_pmem(struct device *dev, + resource_size_t offset, unsigned long size) { - if (arch_has_pmem_api()) - return arch_memremap_pmem(offset, size); - return default_memremap_pmem(offset, size); + return (void __pmem *) devm_memremap(dev, offset, size, + ARCH_MEMREMAP_PMEM); } /** @@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) */ static inline void wmb_pmem(void) { - if (arch_has_pmem_api()) + if (arch_has_wmb_pmem()) arch_wmb_pmem(); + else + wmb(); +} + +/** + * copy_from_iter_pmem - copy data from an iterator to PMEM + * @addr: PMEM destination address + * @bytes: number of bytes to copy + * @i: iterator with source data + * + * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. + * This function requires explicit ordering with a wmb_pmem() call. + */ +static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, + struct iov_iter *i) +{ + if (arch_has_pmem_api()) + return arch_copy_from_iter_pmem(addr, bytes, i); + return default_copy_from_iter_pmem(addr, bytes, i); +} + +/** + * clear_pmem - zero a PMEM memory range + * @addr: virtual start address + * @size: number of bytes to zero + * + * Write zeros into the memory range starting at 'addr' for 'size' bytes. + * This function requires explicit ordering with a wmb_pmem() call. + */ +static inline void clear_pmem(void __pmem *addr, size_t size) +{ + if (arch_has_pmem_api()) + arch_clear_pmem(addr, size); + else + default_clear_pmem(addr, size); } #endif /* __PMEM_H__ */ |