From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/staging/android/Kconfig | 63 + drivers/staging/android/Makefile | 10 + drivers/staging/android/TODO | 17 + drivers/staging/android/ashmem.c | 883 ++++++++++++ drivers/staging/android/ashmem.h | 27 + drivers/staging/android/ion/Kconfig | 35 + drivers/staging/android/ion/Makefile | 10 + drivers/staging/android/ion/compat_ion.c | 195 +++ drivers/staging/android/ion/compat_ion.h | 30 + drivers/staging/android/ion/ion.c | 1652 +++++++++++++++++++++++ drivers/staging/android/ion/ion.h | 203 +++ drivers/staging/android/ion/ion_carveout_heap.c | 193 +++ drivers/staging/android/ion/ion_chunk_heap.c | 194 +++ drivers/staging/android/ion/ion_cma_heap.c | 195 +++ drivers/staging/android/ion/ion_dummy_driver.c | 154 +++ drivers/staging/android/ion/ion_heap.c | 381 ++++++ drivers/staging/android/ion/ion_page_pool.c | 182 +++ drivers/staging/android/ion/ion_priv.h | 405 ++++++ drivers/staging/android/ion/ion_system_heap.c | 422 ++++++ drivers/staging/android/ion/ion_test.c | 283 ++++ drivers/staging/android/ion/tegra/Makefile | 1 + drivers/staging/android/ion/tegra/tegra_ion.c | 80 ++ drivers/staging/android/lowmemorykiller.c | 207 +++ drivers/staging/android/sw_sync.c | 267 ++++ drivers/staging/android/sw_sync.h | 59 + drivers/staging/android/sync.c | 729 ++++++++++ drivers/staging/android/sync.h | 356 +++++ drivers/staging/android/sync_debug.c | 254 ++++ drivers/staging/android/timed_gpio.c | 168 +++ drivers/staging/android/timed_gpio.h | 33 + drivers/staging/android/timed_output.c | 120 ++ drivers/staging/android/timed_output.h | 37 + drivers/staging/android/trace/sync.h | 82 ++ drivers/staging/android/uapi/ashmem.h | 47 + drivers/staging/android/uapi/ion.h | 196 +++ drivers/staging/android/uapi/ion_test.h | 70 + drivers/staging/android/uapi/sw_sync.h | 32 + drivers/staging/android/uapi/sync.h | 97 ++ 38 files changed, 8369 insertions(+) create mode 100644 drivers/staging/android/Kconfig create mode 100644 drivers/staging/android/Makefile create mode 100644 drivers/staging/android/TODO create mode 100644 drivers/staging/android/ashmem.c create mode 100644 drivers/staging/android/ashmem.h create mode 100644 drivers/staging/android/ion/Kconfig create mode 100644 drivers/staging/android/ion/Makefile create mode 100644 drivers/staging/android/ion/compat_ion.c create mode 100644 drivers/staging/android/ion/compat_ion.h create mode 100644 drivers/staging/android/ion/ion.c create mode 100644 drivers/staging/android/ion/ion.h create mode 100644 drivers/staging/android/ion/ion_carveout_heap.c create mode 100644 drivers/staging/android/ion/ion_chunk_heap.c create mode 100644 drivers/staging/android/ion/ion_cma_heap.c create mode 100644 drivers/staging/android/ion/ion_dummy_driver.c create mode 100644 drivers/staging/android/ion/ion_heap.c create mode 100644 drivers/staging/android/ion/ion_page_pool.c create mode 100644 drivers/staging/android/ion/ion_priv.h create mode 100644 drivers/staging/android/ion/ion_system_heap.c create mode 100644 drivers/staging/android/ion/ion_test.c create mode 100644 drivers/staging/android/ion/tegra/Makefile create mode 100644 drivers/staging/android/ion/tegra/tegra_ion.c create mode 100644 drivers/staging/android/lowmemorykiller.c create mode 100644 drivers/staging/android/sw_sync.c create mode 100644 drivers/staging/android/sw_sync.h create mode 100644 drivers/staging/android/sync.c create mode 100644 drivers/staging/android/sync.h create mode 100644 drivers/staging/android/sync_debug.c create mode 100644 drivers/staging/android/timed_gpio.c create mode 100644 drivers/staging/android/timed_gpio.h create mode 100644 drivers/staging/android/timed_output.c create mode 100644 drivers/staging/android/timed_output.h create mode 100644 drivers/staging/android/trace/sync.h create mode 100644 drivers/staging/android/uapi/ashmem.h create mode 100644 drivers/staging/android/uapi/ion.h create mode 100644 drivers/staging/android/uapi/ion_test.h create mode 100644 drivers/staging/android/uapi/sw_sync.h create mode 100644 drivers/staging/android/uapi/sync.h (limited to 'drivers/staging/android') diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig new file mode 100644 index 000000000..8feb9048e --- /dev/null +++ b/drivers/staging/android/Kconfig @@ -0,0 +1,63 @@ +menu "Android" + +if ANDROID + +config ASHMEM + bool "Enable the Anonymous Shared Memory Subsystem" + default n + depends on SHMEM + ---help--- + The ashmem subsystem is a new shared memory allocator, similar to + POSIX SHM but with different behavior and sporting a simpler + file-based API. + + It is, in theory, a good memory allocator for low-memory devices, + because it can discard shared memory units when under memory pressure. + +config ANDROID_TIMED_OUTPUT + bool "Timed output class driver" + default y + +config ANDROID_TIMED_GPIO + tristate "Android timed gpio driver" + depends on GPIOLIB && ANDROID_TIMED_OUTPUT + default n + +config ANDROID_LOW_MEMORY_KILLER + bool "Android Low Memory Killer" + ---help--- + Registers processes to be killed when memory is low + +config SYNC + bool "Synchronization framework" + default n + select ANON_INODES + select DMA_SHARED_BUFFER + ---help--- + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config SW_SYNC + bool "Software synchronization objects" + default n + depends on SYNC + ---help--- + A sync object driver that uses a 32bit counter to coordinate + syncrhronization. Useful when there is no hardware primitive backing + the synchronization. + +config SW_SYNC_USER + bool "Userspace API for SW_SYNC" + default n + depends on SW_SYNC + ---help--- + Provides a user space API to the sw sync object. + *WARNING* improper use of this can result in deadlocking kernel + drivers from userspace. + +source "drivers/staging/android/ion/Kconfig" + +endif # if ANDROID + +endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile new file mode 100644 index 000000000..c7b6c99cc --- /dev/null +++ b/drivers/staging/android/Makefile @@ -0,0 +1,10 @@ +ccflags-y += -I$(src) # needed for trace events + +obj-y += ion/ + +obj-$(CONFIG_ASHMEM) += ashmem.o +obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o +obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +obj-$(CONFIG_SYNC) += sync.o sync_debug.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO new file mode 100644 index 000000000..06954cdf3 --- /dev/null +++ b/drivers/staging/android/TODO @@ -0,0 +1,17 @@ +TODO: + - checkpatch.pl cleanups + - sparse fixes + - rename files to be not so "generic" + - make sure things build as modules properly + - add proper arch dependencies as needed + - audit userspace interfaces to make sure they are sane + - kuid_t should never be exposed to user space as it is + kernel internal type. Data structure for this kuid_t is: + typedef struct { + uid_t val; + } kuid_t; + - This bug is introduced by Xiong Zhou in the patch bd471258f2e09 + - ("staging: android: logger: use kuid_t instead of uid_t") + +Please send patches to Greg Kroah-Hartman and Cc: +Brian Swetland diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c new file mode 100644 index 000000000..82effc8b4 --- /dev/null +++ b/drivers/staging/android/ashmem.c @@ -0,0 +1,883 @@ +/* mm/ashmem.c + * + * Anonymous Shared Memory Subsystem, ashmem + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "ashmem: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ashmem.h" + +#define ASHMEM_NAME_PREFIX "dev/ashmem/" +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) +#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) + +/** + * struct ashmem_area - The anonymous shared memory area + * @name: The optional name in /proc/pid/maps + * @unpinned_list: The list of all ashmem areas + * @file: The shmem-based backing file + * @size: The size of the mapping, in bytes + * @prot_masks: The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close() + */ +struct ashmem_area { + char name[ASHMEM_FULL_NAME_LEN]; + struct list_head unpinned_list; + struct file *file; + size_t size; + unsigned long prot_mask; +}; + +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru: The entry in the LRU list + * @unpinned: The entry in its area's unpinned list + * @asma: The associated anonymous shared memory area. + * @pgstart: The starting page (inclusive) + * @pgend: The ending page (inclusive) + * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex' + */ +struct ashmem_range { + struct list_head lru; + struct list_head unpinned; + struct ashmem_area *asma; + size_t pgstart; + size_t pgend; + unsigned int purged; +}; + +/* LRU list of unpinned pages, protected by ashmem_mutex */ +static LIST_HEAD(ashmem_lru_list); + +/** + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */ +static unsigned long lru_count; + +/** + * ashmem_mutex - protects the list of and each individual ashmem_area + * + * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem + */ +static DEFINE_MUTEX(ashmem_mutex); + +static struct kmem_cache *ashmem_area_cachep __read_mostly; +static struct kmem_cache *ashmem_range_cachep __read_mostly; + +#define range_size(range) \ + ((range)->pgend - (range)->pgstart + 1) + +#define range_on_lru(range) \ + ((range)->purged == ASHMEM_NOT_PURGED) + +#define page_range_subsumes_range(range, start, end) \ + (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) + +#define page_range_subsumed_by_range(range, start, end) \ + (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) + +#define page_in_range(range, page) \ + (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) + +#define page_range_in_range(range, start, end) \ + (page_in_range(range, start) || page_in_range(range, end) || \ + page_range_subsumes_range(range, start, end)) + +#define range_before_page(range, page) \ + ((range)->pgend < (page)) + +#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) + +/** + * lru_add() - Adds a range of memory to the LRU list + * @range: The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */ +static inline void lru_add(struct ashmem_range *range) +{ + list_add_tail(&range->lru, &ashmem_lru_list); + lru_count += range_size(range); +} + +/** + * lru_del() - Removes a range of memory from the LRU list + * @range: The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */ +static inline void lru_del(struct ashmem_range *range) +{ + list_del(&range->lru); + lru_count -= range_size(range); +} + +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma: The associated ashmem_area + * @prev_range: The previous ashmem_range in the sorted asma->unpinned list + * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start: The starting page (inclusive) + * @end: The ending page (inclusive) + * + * This function is protected by ashmem_mutex. + * + * Return: 0 if successful, or -ENOMEM if there is an error + */ +static int range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end) +{ + struct ashmem_range *range; + + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (unlikely(!range)) + return -ENOMEM; + + range->asma = asma; + range->pgstart = start; + range->pgend = end; + range->purged = purged; + + list_add_tail(&range->unpinned, &prev_range->unpinned); + + if (range_on_lru(range)) + lru_add(range); + + return 0; +} + +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range: The associated ashmem_range that has previously been allocated + */ +static void range_del(struct ashmem_range *range) +{ + list_del(&range->unpinned); + if (range_on_lru(range)) + lru_del(range); + kmem_cache_free(ashmem_range_cachep, range); +} + +/** + * range_shrink() - Shrinks an ashmem_range + * @range: The associated ashmem_range being shrunk + * @start: The starting byte of the new range + * @end: The ending byte of the new range + * + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger. + */ +static inline void range_shrink(struct ashmem_range *range, + size_t start, size_t end) +{ + size_t pre = range_size(range); + + range->pgstart = start; + range->pgend = end; + + if (range_on_lru(range)) + lru_count -= pre - range_size(range); +} + +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode: The backing file's index node(?) + * @file: The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */ +static int ashmem_open(struct inode *inode, struct file *file) +{ + struct ashmem_area *asma; + int ret; + + ret = generic_file_open(inode, file); + if (unlikely(ret)) + return ret; + + asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); + if (unlikely(!asma)) + return -ENOMEM; + + INIT_LIST_HEAD(&asma->unpinned_list); + memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); + asma->prot_mask = PROT_MASK; + file->private_data = asma; + + return 0; +} + +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored: The backing file's Index Node(?) - It is ignored here. + * @file: The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */ +static int ashmem_release(struct inode *ignored, struct file *file) +{ + struct ashmem_area *asma = file->private_data; + struct ashmem_range *range, *next; + + mutex_lock(&ashmem_mutex); + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) + range_del(range); + mutex_unlock(&ashmem_mutex); + + if (asma->file) + fput(asma->file); + kmem_cache_free(ashmem_area_cachep, asma); + + return 0; +} + +/** + * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file + * @file: The associated backing file. + * @buf: The buffer of data being written to + * @len: The number of bytes being read + * @pos: The position of the first byte to read. + * + * Return: 0 if successful, or another return code if not. + */ +static ssize_t ashmem_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ + if (asma->size == 0) + goto out_unlock; + + if (!asma->file) { + ret = -EBADF; + goto out_unlock; + } + + mutex_unlock(&ashmem_mutex); + + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + ret = __vfs_read(asma->file, buf, len, pos); + if (ret >= 0) { + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + } + return ret; + +out_unlock: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ + struct ashmem_area *asma = file->private_data; + int ret; + + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { + ret = -EINVAL; + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = vfs_llseek(asma->file, offset, origin); + if (ret < 0) + goto out; + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static inline vm_flags_t calc_vm_may_flags(unsigned long prot) +{ + return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | + _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); +} + +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* user needs to SET_SIZE before mapping */ + if (unlikely(!asma->size)) { + ret = -EINVAL; + goto out; + } + + /* requested protection bits must match our allowed protection mask */ + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & + calc_vm_prot_bits(PROT_MASK))) { + ret = -EPERM; + goto out; + } + vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + + if (!asma->file) { + char *name = ASHMEM_NAME_DEF; + struct file *vmfile; + + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') + name = asma->name; + + /* ... and allocate the backing shmem file */ + vmfile = shmem_file_setup(name, asma->size, vma->vm_flags, 0); + if (unlikely(IS_ERR(vmfile))) { + ret = PTR_ERR(vmfile); + goto out; + } + asma->file = vmfile; + } + get_file(asma->file); + + /* + * XXX - Reworked to use shmem_zero_setup() instead of + * shmem_set_file while we're in staging. -jstultz + */ + if (vma->vm_flags & VM_SHARED) { + ret = shmem_zero_setup(vma); + if (ret) { + fput(asma->file); + goto out; + } + } + + if (vma->vm_file) + fput(vma->vm_file); + vma->vm_file = asma->file; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +/* + * ashmem_shrink - our cache shrinker, called from mm/vmscan.c + * + * 'nr_to_scan' is the number of objects to scan for freeing. + * + * 'gfp_mask' is the mask of the allocation that got us into this mess. + * + * Return value is the number of objects freed or -1 if we cannot + * proceed without risk of deadlock (due to gfp_mask). + * + * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +static unsigned long +ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + struct ashmem_range *range, *next; + unsigned long freed = 0; + + /* We might recurse into filesystem code, so bail out if necessary */ + if (!(sc->gfp_mask & __GFP_FS)) + return SHRINK_STOP; + + mutex_lock(&ashmem_mutex); + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { + loff_t start = range->pgstart * PAGE_SIZE; + loff_t end = (range->pgend + 1) * PAGE_SIZE; + + vfs_fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); + range->purged = ASHMEM_WAS_PURGED; + lru_del(range); + + freed += range_size(range); + if (--sc->nr_to_scan <= 0) + break; + } + mutex_unlock(&ashmem_mutex); + return freed; +} + +static unsigned long +ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + /* + * note that lru_count is count of pages on the lru, not a count of + * objects on the list. This means the scan function needs to return the + * number of pages freed, not the number of objects scanned. + */ + return lru_count; +} + +static struct shrinker ashmem_shrinker = { + .count_objects = ashmem_shrink_count, + .scan_objects = ashmem_shrink_scan, + /* + * XXX (dchinner): I wish people would comment on why they need on + * significant changes to the default value here + */ + .seeks = DEFAULT_SEEKS * 4, +}; + +static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) +{ + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* the user can only remove, not add, protection bits */ + if (unlikely((asma->prot_mask & prot) != prot)) { + ret = -EINVAL; + goto out; + } + + /* does the application expect PROT_READ to imply PROT_EXEC? */ + if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + asma->prot_mask = prot; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static int set_name(struct ashmem_area *asma, void __user *name) +{ + int len; + int ret = 0; + char local_name[ASHMEM_NAME_LEN]; + + /* + * Holding the ashmem_mutex while doing a copy_from_user might cause + * an data abort which would try to access mmap_sem. If another + * thread has invoked ashmem_mmap then it will be holding the + * semaphore and will be waiting for ashmem_mutex, there by leading to + * deadlock. We'll release the mutex and take the name to a local + * variable that does not need protection and later copy the local + * variable to the structure member with lock held. + */ + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; + mutex_lock(&ashmem_mutex); + /* cannot change an existing mapping's name */ + if (unlikely(asma->file)) + ret = -EINVAL; + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + + mutex_unlock(&ashmem_mutex); + return ret; +} + +static int get_name(struct ashmem_area *asma, void __user *name) +{ + int ret = 0; + size_t len; + /* + * Have a local variable to which we'll copy the content + * from asma with the lock held. Later we can copy this to the user + * space safely without holding any locks. So even if we proceed to + * wait for mmap_sem, it won't lead to deadlock. + */ + char local_name[ASHMEM_NAME_LEN]; + + mutex_lock(&ashmem_mutex); + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { + /* + * Copying only `len', instead of ASHMEM_NAME_LEN, bytes + * prevents us from revealing one user's stack to another. + */ + len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; + memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); + } else { + len = sizeof(ASHMEM_NAME_DEF); + memcpy(local_name, ASHMEM_NAME_DEF, len); + } + mutex_unlock(&ashmem_mutex); + + /* + * Now we are just copying from the stack variable to userland + * No lock held + */ + if (unlikely(copy_to_user(name, local_name, len))) + ret = -EFAULT; + return ret; +} + +/* + * ashmem_pin - pin the given ashmem region, returning whether it was + * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + int ret = ASHMEM_NOT_PURGED; + + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* moved past last applicable page; we can short circuit */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to pin pages that span multiple ranges, + * or to pin pages that aren't even unpinned, so this is messy. + * + * Four cases: + * 1. The requested range subsumes an existing range, so we + * just remove the entire matching range. + * 2. The requested range overlaps the start of an existing + * range, so we just update that range. + * 3. The requested range overlaps the end of an existing + * range, so we just update that range. + * 4. The requested range punches a hole in an existing range, + * so we have to update one side of the range and then + * create a new range for the other side. + */ + if (page_range_in_range(range, pgstart, pgend)) { + ret |= range->purged; + + /* Case #1: Easy. Just nuke the whole thing. */ + if (page_range_subsumes_range(range, pgstart, pgend)) { + range_del(range); + continue; + } + + /* Case #2: We overlap from the start, so adjust it */ + if (range->pgstart >= pgstart) { + range_shrink(range, pgend + 1, range->pgend); + continue; + } + + /* Case #3: We overlap from the rear, so adjust it */ + if (range->pgend <= pgend) { + range_shrink(range, range->pgstart, pgstart-1); + continue; + } + + /* + * Case #4: We eat a chunk out of the middle. A bit + * more complicated, we allocate a new range for the + * second half and adjust the first chunk's endpoint. + */ + range_alloc(asma, range, range->purged, + pgend + 1, range->pgend); + range_shrink(range, range->pgstart, pgstart - 1); + break; + } + } + + return ret; +} + +/* + * ashmem_unpin - unpin the given range of pages. Returns zero on success. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ + struct ashmem_range *range, *next; + unsigned int purged = ASHMEM_NOT_PURGED; + +restart: + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { + /* short circuit: this is our insertion point */ + if (range_before_page(range, pgstart)) + break; + + /* + * The user can ask us to unpin pages that are already entirely + * or partially pinned. We handle those two cases here. + */ + if (page_range_subsumed_by_range(range, pgstart, pgend)) + return 0; + if (page_range_in_range(range, pgstart, pgend)) { + pgstart = min_t(size_t, range->pgstart, pgstart), + pgend = max_t(size_t, range->pgend, pgend); + purged |= range->purged; + range_del(range); + goto restart; + } + } + + return range_alloc(asma, range, purged, pgstart, pgend); +} + +/* + * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the + * given interval are unpinned and ASHMEM_IS_PINNED otherwise. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, + size_t pgend) +{ + struct ashmem_range *range; + int ret = ASHMEM_IS_PINNED; + + list_for_each_entry(range, &asma->unpinned_list, unpinned) { + if (range_before_page(range, pgstart)) + break; + if (page_range_in_range(range, pgstart, pgend)) { + ret = ASHMEM_IS_UNPINNED; + break; + } + } + + return ret; +} + +static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, + void __user *p) +{ + struct ashmem_pin pin; + size_t pgstart, pgend; + int ret = -EINVAL; + + if (unlikely(!asma->file)) + return -EINVAL; + + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + return -EFAULT; + + /* per custom, you can pass zero for len to mean "everything onward" */ + if (!pin.len) + pin.len = PAGE_ALIGN(asma->size) - pin.offset; + + if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) + return -EINVAL; + + if (unlikely(((__u32) -1) - pin.offset < pin.len)) + return -EINVAL; + + if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) + return -EINVAL; + + pgstart = pin.offset / PAGE_SIZE; + pgend = pgstart + (pin.len / PAGE_SIZE) - 1; + + mutex_lock(&ashmem_mutex); + + switch (cmd) { + case ASHMEM_PIN: + ret = ashmem_pin(asma, pgstart, pgend); + break; + case ASHMEM_UNPIN: + ret = ashmem_unpin(asma, pgstart, pgend); + break; + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_get_pin_status(asma, pgstart, pgend); + break; + } + + mutex_unlock(&ashmem_mutex); + + return ret; +} + +static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct ashmem_area *asma = file->private_data; + long ret = -ENOTTY; + + switch (cmd) { + case ASHMEM_SET_NAME: + ret = set_name(asma, (void __user *)arg); + break; + case ASHMEM_GET_NAME: + ret = get_name(asma, (void __user *)arg); + break; + case ASHMEM_SET_SIZE: + ret = -EINVAL; + if (!asma->file) { + ret = 0; + asma->size = (size_t) arg; + } + break; + case ASHMEM_GET_SIZE: + ret = asma->size; + break; + case ASHMEM_SET_PROT_MASK: + ret = set_prot_mask(asma, arg); + break; + case ASHMEM_GET_PROT_MASK: + ret = asma->prot_mask; + break; + case ASHMEM_PIN: + case ASHMEM_UNPIN: + case ASHMEM_GET_PIN_STATUS: + ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); + break; + case ASHMEM_PURGE_ALL_CACHES: + ret = -EPERM; + if (capable(CAP_SYS_ADMIN)) { + struct shrink_control sc = { + .gfp_mask = GFP_KERNEL, + .nr_to_scan = LONG_MAX, + }; + ret = ashmem_shrink_count(&ashmem_shrinker, &sc); + ashmem_shrink_scan(&ashmem_shrinker, &sc); + } + break; + } + + return ret; +} + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case COMPAT_ASHMEM_SET_SIZE: + cmd = ASHMEM_SET_SIZE; + break; + case COMPAT_ASHMEM_SET_PROT_MASK: + cmd = ASHMEM_SET_PROT_MASK; + break; + } + return ashmem_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations ashmem_fops = { + .owner = THIS_MODULE, + .open = ashmem_open, + .release = ashmem_release, + .read = ashmem_read, + .llseek = ashmem_llseek, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ashmem_ioctl, +#endif +}; + +static struct miscdevice ashmem_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ashmem", + .fops = &ashmem_fops, +}; + +static int __init ashmem_init(void) +{ + int ret; + + ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", + sizeof(struct ashmem_area), + 0, 0, NULL); + if (unlikely(!ashmem_area_cachep)) { + pr_err("failed to create slab cache\n"); + return -ENOMEM; + } + + ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", + sizeof(struct ashmem_range), + 0, 0, NULL); + if (unlikely(!ashmem_range_cachep)) { + pr_err("failed to create slab cache\n"); + return -ENOMEM; + } + + ret = misc_register(&ashmem_misc); + if (unlikely(ret)) { + pr_err("failed to register misc device!\n"); + return ret; + } + + register_shrinker(&ashmem_shrinker); + + pr_info("initialized\n"); + + return 0; +} + +static void __exit ashmem_exit(void) +{ + int ret; + + unregister_shrinker(&ashmem_shrinker); + + ret = misc_deregister(&ashmem_misc); + if (unlikely(ret)) + pr_err("failed to unregister misc device!\n"); + + kmem_cache_destroy(ashmem_range_cachep); + kmem_cache_destroy(ashmem_area_cachep); + + pr_info("unloaded\n"); +} + +module_init(ashmem_init); +module_exit(ashmem_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h new file mode 100644 index 000000000..5abcfd7aa --- /dev/null +++ b/drivers/staging/android/ashmem.h @@ -0,0 +1,27 @@ +/* + * include/linux/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _LINUX_ASHMEM_H +#define _LINUX_ASHMEM_H + +#include +#include +#include + +#include "uapi/ashmem.h" + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t) +#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int) +#endif + +#endif /* _LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 000000000..345234624 --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION + bool "Ion Memory Manager" + depends on HAVE_MEMBLOCK && HAS_DMA && MMU + select GENERIC_ALLOCATOR + select DMA_SHARED_BUFFER + ---help--- + Chose this option to enable the ION Memory Manager, + used by Android to efficiently allocate buffers + from userspace that can be shared between drivers. + If you're not using Android its probably safe to + say N here. + +config ION_TEST + tristate "Ion Test Device" + depends on ION + help + Choose this option to create a device that can be used to test the + kernel and device side ION functions. + +config ION_DUMMY + bool "Dummy Ion driver" + depends on ION + help + Provides a dummy ION driver that registers the + /dev/ion device and some basic heaps. This can + be used for testing the ION infrastructure if + one doesn't have access to hardware drivers that + use ION. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 000000000..b56fd2bf2 --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ + ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 000000000..a402fdaf5 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { + compat_size_t len; + compat_size_t align; + compat_uint_t heap_id_mask; + compat_uint_t flags; + compat_int_t handle; +}; + +struct compat_ion_custom_data { + compat_uint_t cmd; + compat_ulong_t arg; +}; + +struct compat_ion_handle_data { + compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ + struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ + struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data32->len); + err |= put_user(s, &data->len); + err |= get_user(s, &data32->align); + err |= put_user(s, &data->align); + err |= get_user(u, &data32->heap_id_mask); + err |= put_user(u, &data->heap_id_mask); + err |= get_user(u, &data32->flags); + err |= put_user(u, &data->flags); + err |= get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_get_ion_handle_data( + struct compat_ion_handle_data __user *data32, + struct ion_handle_data __user *data) +{ + compat_int_t i; + int err; + + err = get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_put_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data->len); + err |= put_user(s, &data32->len); + err |= get_user(s, &data->align); + err |= put_user(s, &data32->align); + err |= get_user(u, &data->heap_id_mask); + err |= put_user(u, &data32->heap_id_mask); + err |= get_user(u, &data->flags); + err |= put_user(u, &data32->flags); + err |= get_user(i, &data->handle); + err |= put_user(i, &data32->handle); + + return err; +} + +static int compat_get_ion_custom_data( + struct compat_ion_custom_data __user *data32, + struct ion_custom_data __user *data) +{ + compat_uint_t cmd; + compat_ulong_t arg; + int err; + + err = get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + err |= get_user(arg, &data32->arg); + err |= put_user(arg, &data->arg); + + return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!filp->f_op->unlocked_ioctl) + return -ENOTTY; + + switch (cmd) { + case COMPAT_ION_IOC_ALLOC: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, + (unsigned long)data); + err = compat_put_ion_allocation_data(data32, data); + return ret ? ret : err; + } + case COMPAT_ION_IOC_FREE: + { + struct compat_ion_handle_data __user *data32; + struct ion_handle_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_handle_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, + (unsigned long)data); + } + case COMPAT_ION_IOC_CUSTOM: { + struct compat_ion_custom_data __user *data32; + struct ion_custom_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_custom_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, + (unsigned long)data); + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + case ION_IOC_IMPORT: + case ION_IOC_SYNC: + return filp->f_op->unlocked_ioctl(filp, cmd, + (unsigned long)compat_ptr(arg)); + default: + return -ENOIOCTLCMD; + } +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 000000000..c2ad5893d --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,30 @@ +/* + + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 000000000..b0b96ab31 --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1652 @@ +/* + + * drivers/staging/android/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root clients; + struct dentry *debug_root; + struct dentry *heaps_debug_root; + struct dentry *clients_debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @idr: an idr space for allocating handle ids + * @lock: lock protecting the tree of handles + * @name: used for debugging + * @display_name: used for debugging (unique version of @name) + * @display_serial: used for debugging (to make display_name unique) + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct idr idr; + struct mutex lock; + const char *name; + char *display_name; + int display_serial; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @id: client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ + return (buffer->flags & ION_FLAG_CACHED) && + !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ + return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ + return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + buffer->flags = flags; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + + if (ret) { + if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) + goto err2; + + ion_heap_freelist_drain(heap, 0); + ret = heap->ops->allocate(heap, buffer, len, align, + flags); + if (ret) + goto err2; + } + + buffer->dev = dev; + buffer->size = len; + + table = heap->ops->map_dma(heap, buffer); + if (WARN_ONCE(table == NULL, + "heap->ops->map_dma should return ERR_PTR on error")) + table = ERR_PTR(-EINVAL); + if (IS_ERR(table)) { + heap->ops->free(buffer); + kfree(buffer); + return ERR_CAST(table); + } + buffer->sg_table = table; + if (ion_buffer_fault_user_mappings(buffer)) { + int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct scatterlist *sg; + int i, j, k = 0; + + buffer->pages = vmalloc(sizeof(struct page *) * num_pages); + if (!buffer->pages) { + ret = -ENOMEM; + goto err1; + } + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + for (j = 0; j < sg->length / PAGE_SIZE; j++) + buffer->pages[k++] = page++; + } + + if (ret) + goto err; + } + + buffer->dev = dev; + buffer->size = len; + INIT_LIST_HEAD(&buffer->vmas); + mutex_init(&buffer->lock); + /* this will set up dma addresses for the sglist -- it is not + technically correct as per the dma api -- a specific + device isn't really taking ownership here. However, in practice on + our systems the only dma_address space is physical addresses. + Additionally, we can't afford the overhead of invalidating every + allocation via dma_map_sg. The implicit contract here is that + memory coming from the heaps is ready for dma, ie if it has a + cached mapping that mapping has been invalidated */ + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) + sg_dma_address(sg) = sg_phys(sg); + mutex_lock(&dev->buffer_lock); + ion_buffer_add(dev, buffer); + mutex_unlock(&dev->buffer_lock); + return buffer; + +err: + heap->ops->unmap_dma(heap, buffer); + heap->ops->free(buffer); +err1: + vfree(buffer->pages); +err2: + kfree(buffer); + return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ + if (WARN_ON(buffer->kmap_cnt > 0)) + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->heap->ops->free(buffer); + vfree(buffer->pages); + kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_heap *heap = buffer->heap; + struct ion_device *dev = buffer->dev; + + mutex_lock(&dev->buffer_lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->buffer_lock); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_freelist_add(heap, buffer); + else + ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ + mutex_lock(&buffer->lock); + buffer->handle_count++; + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ + /* + * when a buffer is removed from a handle, if it is not in + * any other handles, copy the taskcomm and the pid of the + * process it's being removed from into the buffer. At this + * point there will be no way to track what processes this buffer is + * being used by, it only exists as a dma_buf file descriptor. + * The taskcomm and pid can provide a debug hint as to where this fd + * is in the system + */ + mutex_lock(&buffer->lock); + buffer->handle_count--; + BUG_ON(buffer->handle_count < 0); + if (!buffer->handle_count) { + struct task_struct *task; + + task = current->group_leader; + get_task_comm(buffer->task_comm, task); + buffer->pid = task_pid_nr(task); + } + mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + RB_CLEAR_NODE(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + ion_buffer_add_to_handle(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + struct ion_client *client = handle->client; + struct ion_buffer *buffer = handle->buffer; + + mutex_lock(&buffer->lock); + while (handle->kmap_cnt) + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + + idr_remove(&client->idr, handle->id); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &client->handles); + + ion_buffer_remove_from_handle(buffer); + ion_buffer_put(buffer); + + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + int ret; + + mutex_lock(&client->lock); + ret = kref_put(&handle->ref, ion_handle_destroy); + mutex_unlock(&client->lock); + + return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + + if (buffer < entry->buffer) + n = n->rb_left; + else if (buffer > entry->buffer) + n = n->rb_right; + else + return entry; + } + return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = idr_find(&client->idr, id); + if (handle) + ion_handle_get(handle); + mutex_unlock(&client->lock); + + return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, + struct ion_handle *handle) +{ + WARN_ON(!mutex_is_locked(&client->lock)); + return idr_find(&client->idr, handle->id) == handle; +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + int id; + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); + if (id < 0) + return id; + + handle->id = id; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle->buffer < entry->buffer) + p = &(*p)->rb_left; + else if (handle->buffer > entry->buffer) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); + + return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + struct ion_heap *heap; + int ret; + + pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, + len, align, heap_id_mask, flags); + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + len = PAGE_ALIGN(len); + + if (!len) + return ERR_PTR(-EINVAL); + + down_read(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + /* if the caller didn't specify this heap id */ + if (!((1 << heap->id) & heap_id_mask)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR(buffer)) + break; + } + up_read(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (IS_ERR(handle)) + return handle; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + mutex_unlock(&client->lock); + return; + } + mutex_unlock(&client->lock); + ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n", + __func__, buffer->heap->name, buffer->heap->type); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ + void *vaddr; + + if (buffer->kmap_cnt) { + buffer->kmap_cnt++; + return buffer->vaddr; + } + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (WARN_ONCE(vaddr == NULL, + "heap->ops->map_kernel should return ERR_PTR on error")) + return ERR_PTR(-EINVAL); + if (IS_ERR(vaddr)) + return vaddr; + buffer->vaddr = vaddr; + buffer->kmap_cnt++; + return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + void *vaddr; + + if (handle->kmap_cnt) { + handle->kmap_cnt++; + return buffer->vaddr; + } + vaddr = ion_buffer_kmap_get(buffer); + if (IS_ERR(vaddr)) + return vaddr; + handle->kmap_cnt++; + return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ + buffer->kmap_cnt--; + if (!buffer->kmap_cnt) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + + if (!handle->kmap_cnt) { + WARN(1, "%s: Double unmap detected! bailing...\n", __func__); + return; + } + handle->kmap_cnt--; + if (!handle->kmap_cnt) + ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&buffer->lock); + vaddr = ion_handle_kmap_get(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAP_IDS] = {0}; + const char *names[ION_NUM_HEAP_IDS] = {NULL}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + unsigned int id = handle->buffer->heap->id; + + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAP_IDS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ion_get_client_serial(const struct rb_root *root, + const unsigned char *name) +{ + int serial = -1; + struct rb_node *node; + + for (node = rb_first(root); node; node = rb_next(node)) { + struct ion_client *client = rb_entry(node, struct ion_client, + node); + + if (strcmp(client->name, name)) + continue; + serial = max(serial, client->display_serial); + } + return serial + 1; +} + +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + pid_t pid; + + if (!name) { + pr_err("%s: Name cannot be null\n", __func__); + return ERR_PTR(-EINVAL); + } + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) + goto err_put_task_struct; + + client->dev = dev; + client->handles = RB_ROOT; + idr_init(&client->idr); + mutex_init(&client->lock); + client->task = task; + client->pid = pid; + client->name = kstrdup(name, GFP_KERNEL); + if (!client->name) + goto err_free_client; + + down_write(&dev->lock); + client->display_serial = ion_get_client_serial(&dev->clients, name); + client->display_name = kasprintf( + GFP_KERNEL, "%s-%d", name, client->display_serial); + if (!client->display_name) { + up_write(&dev->lock); + goto err_free_client_name; + } + p = &dev->clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->clients); + + client->debug_root = debugfs_create_file(client->display_name, 0664, + dev->clients_debug_root, + client, &debug_client_fops); + if (!client->debug_root) { + char buf[256], *path; + + path = dentry_path(dev->clients_debug_root, buf, 256); + pr_err("Failed to create client debugfs at %s/%s\n", + path, client->display_name); + } + + up_write(&dev->lock); + + return client; + +err_free_client_name: + kfree(client->name); +err_free_client: + kfree(client); +err_put_task_struct: + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + + idr_destroy(&client->idr); + + down_write(&dev->lock); + if (client->task) + put_task_struct(client->task); + rb_erase(&client->node, &dev->clients); + debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); + + kfree(client->display_name); + kfree(client->name); + kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct sg_table *table; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + table = buffer->sg_table; + mutex_unlock(&client->lock); + return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t that is valid + * for the targeted device, but this works on the currently targeted + * hardware. + */ + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vma->vm_private_data; + unsigned long pfn; + int ret; + + mutex_lock(&buffer->lock); + ion_buffer_page_dirty(buffer->pages + vmf->pgoff); + BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + + pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + mutex_unlock(&buffer->lock); + if (ret) + return VM_FAULT_ERROR; + + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = dmabuf->priv; + int ret = 0; + + if (!buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping to userspace\n", + __func__); + return -EINVAL; + } + + if (ion_buffer_fault_user_mappings(buffer)) { + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | + VM_DONTDUMP; + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + return 0; + } + + if (!(buffer->flags & ION_FLAG_CACHED)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + + if (ret) + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + + return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ + struct ion_buffer *buffer = dmabuf->priv; + + return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, + void *ptr) +{ +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; + } + + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + return PTR_ERR_OR_ZERO(vaddr); +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = ion_map_dma_buf, + .unmap_dma_buf = ion_unmap_dma_buf, + .mmap = ion_mmap, + .release = ion_dma_buf_release, + .begin_cpu_access = ion_dma_buf_begin_cpu_access, + .end_cpu_access = ion_dma_buf_end_cpu_access, + .kmap_atomic = ion_dma_buf_kmap, + .kunmap_atomic = ion_dma_buf_kunmap, + .kmap = ion_dma_buf_kmap, + .kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); + mutex_unlock(&client->lock); + + exp_info.ops = &dma_buf_ops; + exp_info.size = buffer->size; + exp_info.flags = O_RDWR; + exp_info.priv = buffer; + + dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(dmabuf)) { + ion_buffer_put(buffer); + return dmabuf; + } + + return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = ion_share_dma_buf(client, handle); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, O_CLOEXEC); + if (fd < 0) + dma_buf_put(dmabuf); + + return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + struct ion_handle *handle; + int ret; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_CAST(dmabuf); + /* if this memory came from ion */ + + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not import dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return ERR_PTR(-EINVAL); + } + buffer = dmabuf->priv; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { + ion_handle_get(handle); + mutex_unlock(&client->lock); + goto end; + } + mutex_unlock(&client->lock); + + handle = ion_handle_create(client, buffer); + if (IS_ERR(handle)) + goto end; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + +end: + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + /* if this memory came from ion */ + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not sync dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + buffer = dmabuf->priv; + + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_BIDIRECTIONAL); + dma_buf_put(dmabuf); + return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ + switch (cmd) { + case ION_IOC_SYNC: + case ION_IOC_FREE: + case ION_IOC_CUSTOM: + return _IOC_WRITE; + default: + return _IOC_DIR(cmd); + } +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + struct ion_device *dev = client->dev; + struct ion_handle *cleanup_handle = NULL; + int ret = 0; + unsigned int dir; + + union { + struct ion_fd_data fd; + struct ion_allocation_data allocation; + struct ion_handle_data handle; + struct ion_custom_data custom; + } data; + + dir = ion_ioctl_dir(cmd); + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (dir & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_handle *handle; + + handle = ion_alloc(client, data.allocation.len, + data.allocation.align, + data.allocation.heap_id_mask, + data.allocation.flags); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + data.allocation.handle = handle->id; + + cleanup_handle = handle; + break; + } + case ION_IOC_FREE: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + ion_free(client, handle); + ion_handle_put(handle); + break; + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + data.fd.fd = ion_share_dma_buf_fd(client, handle); + ion_handle_put(handle); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; + } + case ION_IOC_IMPORT: + { + struct ion_handle *handle; + + handle = ion_import_dma_buf(client, data.fd.fd); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + break; + } + case ION_IOC_SYNC: + { + ret = ion_sync_for_device(client, data.fd.fd); + break; + } + case ION_IOC_CUSTOM: + { + if (!dev->custom_ioctl) + return -ENOTTY; + ret = dev->custom_ioctl(client, data.custom.cmd, + data.custom.arg); + break; + } + default: + return -ENOTTY; + } + + if (dir & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { + if (cleanup_handle) + ion_free(client, cleanup_handle); + return -EFAULT; + } + } + return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_destroy(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + char debug_name[64]; + + pr_debug("%s: %d\n", __func__, __LINE__); + snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); + client = ion_client_create(dev, debug_name); + if (IS_ERR(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, + .compat_ioctl = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + unsigned int id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + size_t total_size = 0; + size_t total_orphaned_size = 0; + + seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); + seq_puts(s, "----------------------------------------------------\n"); + + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + + if (!size) + continue; + if (client->task) { + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16s %16u %16zu\n", task_comm, + client->pid, size); + } else { + seq_printf(s, "%16s %16u %16zu\n", client->name, + client->pid, size); + } + } + seq_puts(s, "----------------------------------------------------\n"); + seq_puts(s, "orphaned allocations (info is from last known client):\n"); + mutex_lock(&dev->buffer_lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, + node); + if (buffer->heap->id != heap->id) + continue; + total_size += buffer->size; + if (!buffer->handle_count) { + seq_printf(s, "%16s %16u %16zu %d %d\n", + buffer->task_comm, buffer->pid, + buffer->size, buffer->kmap_cnt, + atomic_read(&buffer->ref.refcount)); + total_orphaned_size += buffer->size; + } + } + mutex_unlock(&dev->buffer_lock); + seq_puts(s, "----------------------------------------------------\n"); + seq_printf(s, "%16s %16zu\n", "total orphaned", + total_orphaned_size); + seq_printf(s, "%16s %16zu\n", "total ", total_size); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + seq_printf(s, "%16s %16zu\n", "deferred free", + heap->free_list_size); + seq_puts(s, "----------------------------------------------------\n"); + + if (heap->debug_show) + heap->debug_show(heap, s, unused); + + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + if (!val) + return 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + sc.nr_to_scan = objs; + + heap->shrinker.shrink(&heap->shrinker, &sc); + return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + *val = objs; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, + debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + struct dentry *debug_file; + + if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || + !heap->ops->unmap_dma) + pr_err("%s: can not add heap with invalid ops struct.\n", + __func__); + + spin_lock_init(&heap->free_lock); + heap->free_list_size = 0; + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_init_deferred_free(heap); + + if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) + ion_heap_init_shrinker(heap); + + heap->dev = dev; + down_write(&dev->lock); + /* use negative heap->id to reverse the priority -- when traversing + the list later attempt higher id numbers first */ + plist_node_init(&heap->node, -heap->id); + plist_add(&heap->node, &dev->heaps); + debug_file = debugfs_create_file(heap->name, 0664, + dev->heaps_debug_root, heap, + &debug_heap_fops); + + if (!debug_file) { + char buf[256], *path; + + path = dentry_path(dev->heaps_debug_root, buf, 256); + pr_err("Failed to create heap debugfs at %s/%s\n", + path, heap->name); + } + +#ifdef DEBUG_HEAP_SHRINKER + if (heap->shrinker.shrink) { + char debug_name[64]; + + snprintf(debug_name, 64, "%s_shrink", heap->name); + debug_file = debugfs_create_file( + debug_name, 0644, dev->heaps_debug_root, heap, + &debug_shrink_fops); + if (!debug_file) { + char buf[256], *path; + + path = dentry_path(dev->heaps_debug_root, buf, 256); + pr_err("Failed to create heap shrinker debugfs at %s/%s\n", + path, debug_name); + } + } +#endif + up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (!idev->debug_root) { + pr_err("ion: failed to create debugfs root directory.\n"); + goto debugfs_done; + } + idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); + if (!idev->heaps_debug_root) { + pr_err("ion: failed to create debugfs heaps directory.\n"); + goto debugfs_done; + } + idev->clients_debug_root = debugfs_create_dir("clients", + idev->debug_root); + if (!idev->clients_debug_root) + pr_err("ion: failed to create debugfs clients directory.\n"); + +debugfs_done: + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->buffer_lock); + init_rwsem(&idev->lock); + plist_head_init(&idev->heaps); + idev->clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + debugfs_remove_recursive(dev->debug_root); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ + int i; + + for (i = 0; i < data->nr; i++) { + if (data->heaps[i].size == 0) + continue; + + if (data->heaps[i].base == 0) { + phys_addr_t paddr; + + paddr = memblock_alloc_base(data->heaps[i].size, + data->heaps[i].align, + MEMBLOCK_ALLOC_ANYWHERE); + if (!paddr) { + pr_err("%s: error allocating memblock for heap %d\n", + __func__, i); + continue; + } + data->heaps[i].base = paddr; + } else { + int ret = memblock_reserve(data->heaps[i].base, + data->heaps[i].size); + if (ret) + pr_err("memblock reserve of %zx@%lx failed\n", + data->heaps[i].size, + data->heaps[i].base); + } + pr_info("%s: %s reserved base %lx size %zu\n", __func__, + data->heaps[i].name, + data->heaps[i].base, + data->heaps[i].size); + } +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 000000000..443db8459 --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,203 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating higher numbers + * will be allocated from first. At allocation these are passed + * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * @align: required alignment in physical memory if applicable + * @priv: private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; + ion_phys_addr_t align; + void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + int nr; + struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data: platform data specifying starting physical address and + * size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specific sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks + * have alignment requirements of some kind + * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from highest to lowest + * id + * @flags: heap flags, the low 16 bits are consumed by ion, the + * high 16 bits are passed on to the respective heap and + * can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client: the client + * @handle: the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client: the client + * @handle: the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 000000000..9156d8238 --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,193 @@ +/* + * drivers/staging/android/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + *addr = paddr; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct sg_table *table; + ion_phys_addr_t paddr; + int ret; + + if (align > PAGE_SIZE) + return -EINVAL; + + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_free; + + paddr = ion_carveout_allocate(heap, size, align); + if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { + ret = -ENOMEM; + goto err_free_table; + } + + sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); + buffer->priv_virt = table; + + return 0; + +err_free_table: + sg_free_table(table); +err_free: + kfree(table); + return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + ion_carveout_free(heap, paddr, buffer->size); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_dma = ion_carveout_heap_map_dma, + .unmap_dma = ion_carveout_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + int ret; + + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 000000000..3e6ec2ee6 --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/staging/android/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned long chunk_size; + unsigned long size; + unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table; + struct scatterlist *sg; + int ret, i; + unsigned long num_chunks; + unsigned long allocated_size; + + if (align > chunk_heap->chunk_size) + return -EINVAL; + + allocated_size = ALIGN(size, chunk_heap->chunk_size); + num_chunks = allocated_size / chunk_heap->chunk_size; + + if (allocated_size > chunk_heap->size - chunk_heap->allocated) + return -ENOMEM; + + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); + if (ret) { + kfree(table); + return ret; + } + + sg = table->sgl; + for (i = 0; i < num_chunks; i++) { + unsigned long paddr = gen_pool_alloc(chunk_heap->pool, + chunk_heap->chunk_size); + if (!paddr) + goto err; + sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), + chunk_heap->chunk_size, 0); + sg = sg_next(sg); + } + + buffer->priv_virt = table; + chunk_heap->allocated += allocated_size; + return 0; +err: + sg = table->sgl; + for (i -= 1; i >= 0; i--) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + sg = sg_next(sg); + } + sg_free_table(table); + kfree(table); + return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table = buffer->priv_virt; + struct scatterlist *sg; + int i; + unsigned long allocated_size; + + allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + for_each_sg(table->sgl, sg, table->nents, i) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + } + chunk_heap->allocated -= allocated_size; + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops chunk_heap_ops = { + .allocate = ion_chunk_heap_allocate, + .free = ion_chunk_heap_free, + .map_dma = ion_chunk_heap_map_dma, + .unmap_dma = ion_chunk_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_chunk_heap *chunk_heap; + int ret; + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); + if (!chunk_heap) + return ERR_PTR(-ENOMEM); + + chunk_heap->chunk_size = (unsigned long)heap_data->priv; + chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + + PAGE_SHIFT, -1); + if (!chunk_heap->pool) { + ret = -ENOMEM; + goto error_gen_pool_create; + } + chunk_heap->base = heap_data->base; + chunk_heap->size = heap_data->size; + chunk_heap->allocated = 0; + + gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); + chunk_heap->heap.ops = &chunk_heap_ops; + chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; + chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, + heap_data->size, heap_data->align); + + return &chunk_heap->heap; + +error_gen_pool_create: + kfree(chunk_heap); + return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + + gen_pool_destroy(chunk_heap->pool); + kfree(chunk_heap); + chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 000000000..f4211f1be --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { + struct ion_heap heap; + struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + if (buffer->flags & ION_FLAG_CACHED) + return -EINVAL; + + if (align > PAGE_SIZE) + return -EINVAL; + + info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); + if (!info) + return ION_CMA_ALLOCATE_FAILED; + + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), + GFP_HIGHUSER | __GFP_ZERO); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); + goto err; + } + + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) + goto free_mem; + + if (dma_common_get_sgtable + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(dev, "Allocate buffer %p\n", buffer); + return 0; + +free_table: + kfree(info->table); +free_mem: + dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Release buffer %p\n", buffer); + /* release memory */ + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, + &info->handle); + + *addr = info->handle; + *len = buffer->size; + + return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { + .allocate = ion_cma_allocate, + .free = ion_cma_free, + .map_dma = ion_cma_heap_map_dma, + .unmap_dma = ion_cma_heap_unmap_dma, + .phys = ion_cma_phys, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, + .unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + + if (!cma_heap) + return ERR_PTR(-ENOMEM); + + cma_heap->heap.ops = &ion_cma_ops; + /* get device from private heaps data, later it will be + * used to make the link with reserved CMA memory */ + cma_heap->dev = data->priv; + cma_heap->heap.type = ION_HEAP_TYPE_DMA; + return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + + kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 000000000..5678870bf --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,154 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +static struct ion_device *idev; +static struct ion_heap **heaps; + +static void *carveout_ptr; +static void *chunk_ptr; + +static struct ion_platform_heap dummy_heaps[] = { + { + .id = ION_HEAP_TYPE_SYSTEM, + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + }, + { + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "system contig", + }, + { + .id = ION_HEAP_TYPE_CARVEOUT, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .size = SZ_4M, + }, + { + .id = ION_HEAP_TYPE_CHUNK, + .type = ION_HEAP_TYPE_CHUNK, + .name = "chunk", + .size = SZ_4M, + .align = SZ_16K, + .priv = (void *)(SZ_16K), + }, +}; + +static struct ion_platform_data dummy_ion_pdata = { + .nr = ARRAY_SIZE(dummy_heaps), + .heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ + int i, err; + + idev = ion_device_create(NULL); + heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *), + GFP_KERNEL); + if (!heaps) + return -ENOMEM; + + + /* Allocate a dummy carveout heap */ + carveout_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, + GFP_KERNEL); + if (carveout_ptr) + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = + virt_to_phys(carveout_ptr); + else + pr_err("ion_dummy: Could not allocate carveout\n"); + + /* Allocate a dummy chunk heap */ + chunk_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CHUNK].size, + GFP_KERNEL); + if (chunk_ptr) + dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); + else + pr_err("ion_dummy: Could not allocate chunk\n"); + + for (i = 0; i < dummy_ion_pdata.nr; i++) { + struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + + if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && + !heap_data->base) + continue; + + if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) + continue; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + return 0; +err: + for (i = 0; i < dummy_ion_pdata.nr; ++i) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + return err; +} +device_initcall(ion_dummy_init); + +static void __exit ion_dummy_exit(void) +{ + int i; + + ion_device_destroy(idev); + + for (i = 0; i < dummy_ion_pdata.nr; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } +} +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 000000000..fd13d05b5 --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,381 @@ +/* + * drivers/staging/android/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i, j; + void *vaddr; + pgprot_t pgprot; + struct sg_table *table = buffer->sg_table; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + + if (!pages) + return NULL; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sg(table->sgl, sg, table->nents, i) { + int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; + struct page *page = sg_page(sg); + + BUG_ON(i >= npages); + for (j = 0; j < npages_this_entry; j++) + *(tmp++) = page++; + } + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (vaddr == NULL) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct sg_table *table = buffer->sg_table; + unsigned long addr = vma->vm_start; + unsigned long offset = vma->vm_pgoff * PAGE_SIZE; + struct scatterlist *sg; + int i; + int ret; + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + unsigned long remainder = vma->vm_end - addr; + unsigned long len = sg->length; + + if (offset >= sg->length) { + offset -= sg->length; + continue; + } else if (offset) { + page += offset / PAGE_SIZE; + len = sg->length - offset; + offset = 0; + } + len = min(len, remainder); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, + vma->vm_page_prot); + if (ret) + return ret; + addr += len; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ + void *addr = vm_map_ram(pages, num, -1, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); + vm_unmap_ram(addr, num); + + return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, + pgprot_t pgprot) +{ + int p = 0; + int ret = 0; + struct sg_page_iter piter; + struct page *pages[32]; + + for_each_sg_page(sgl, &piter, nents, 0) { + pages[p++] = sg_page_iter_page(&piter); + if (p == ARRAY_SIZE(pages)) { + ret = ion_heap_clear_pages(pages, p, pgprot); + if (ret) + return ret; + p = 0; + } + } + if (p) + ret = ion_heap_clear_pages(pages, p, pgprot); + + return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->sg_table; + pgprot_t pgprot; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ + spin_lock(&heap->free_lock); + list_add(&buffer->list, &heap->free_list); + heap->free_list_size += buffer->size; + spin_unlock(&heap->free_lock); + wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ + size_t size; + + spin_lock(&heap->free_lock); + size = heap->free_list_size; + spin_unlock(&heap->free_lock); + + return size; +} + +static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, + bool skip_pools) +{ + struct ion_buffer *buffer; + size_t total_drained = 0; + + if (ion_heap_freelist_size(heap) == 0) + return 0; + + spin_lock(&heap->free_lock); + if (size == 0) + size = heap->free_list_size; + + while (!list_empty(&heap->free_list)) { + if (total_drained >= size) + break; + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + if (skip_pools) + buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; + total_drained += buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + spin_lock(&heap->free_lock); + } + spin_unlock(&heap->free_lock); + + return total_drained; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ + return _ion_heap_freelist_drain(heap, size, false); +} + +size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) +{ + return _ion_heap_freelist_drain(heap, size, true); +} + +static int ion_heap_deferred_free(void *data) +{ + struct ion_heap *heap = data; + + while (true) { + struct ion_buffer *buffer; + + wait_event_freezable(heap->waitqueue, + ion_heap_freelist_size(heap) > 0); + + spin_lock(&heap->free_lock); + if (list_empty(&heap->free_list)) { + spin_unlock(&heap->free_lock); + continue; + } + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + } + + return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ + struct sched_param param = { .sched_priority = 0 }; + + INIT_LIST_HEAD(&heap->free_list); + init_waitqueue_head(&heap->waitqueue); + heap->task = kthread_run(ion_heap_deferred_free, heap, + "%s", heap->name); + if (IS_ERR(heap->task)) { + pr_err("%s: creating thread for deferred free failed\n", + __func__); + return PTR_ERR_OR_ZERO(heap->task); + } + sched_setscheduler(heap->task, SCHED_IDLE, ¶m); + return 0; +} + +static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + int total = 0; + + total = ion_heap_freelist_size(heap) / PAGE_SIZE; + if (heap->ops->shrink) + total += heap->ops->shrink(heap, sc->gfp_mask, 0); + return total; +} + +static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + int freed = 0; + int to_scan = sc->nr_to_scan; + + if (to_scan == 0) + return 0; + + /* + * shrink the free list first, no point in zeroing the memory if we're + * just going to reclaim it. Also, skip any possible page pooling. + */ + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / + PAGE_SIZE; + + to_scan -= freed; + if (to_scan <= 0) + return freed; + + if (heap->ops->shrink) + freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); + return freed; +} + +void ion_heap_init_shrinker(struct ion_heap *heap) +{ + heap->shrinker.count_objects = ion_heap_shrink_count; + heap->shrinker.scan_objects = ion_heap_shrink_scan; + heap->shrinker.seeks = DEFAULT_SEEKS; + heap->shrinker.batch = 0; + register_shrinker(&heap->shrinker); +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CHUNK: + heap = ion_chunk_heap_create(heap_data); + break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CHUNK: + ion_chunk_heap_destroy(heap); + break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 000000000..4b88f11e5 --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,182 @@ +/* + * drivers/staging/android/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion_priv.h" + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ + struct page *page = alloc_pages(pool->gfp_mask, pool->order); + + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, + DMA_BIDIRECTIONAL); + return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ + mutex_lock(&pool->mutex); + if (PageHighMem(page)) { + list_add_tail(&page->lru, &pool->high_items); + pool->high_count++; + } else { + list_add_tail(&page->lru, &pool->low_items); + pool->low_count++; + } + mutex_unlock(&pool->mutex); + return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ + struct page *page; + + if (high) { + BUG_ON(!pool->high_count); + page = list_first_entry(&pool->high_items, struct page, lru); + pool->high_count--; + } else { + BUG_ON(!pool->low_count); + page = list_first_entry(&pool->low_items, struct page, lru); + pool->low_count--; + } + + list_del(&page->lru); + return page; +} + +struct page *ion_page_pool_alloc(struct ion_page_pool *pool) +{ + struct page *page = NULL; + + BUG_ON(!pool); + + mutex_lock(&pool->mutex); + if (pool->high_count) + page = ion_page_pool_remove(pool, true); + else if (pool->low_count) + page = ion_page_pool_remove(pool, false); + mutex_unlock(&pool->mutex); + + if (!page) + page = ion_page_pool_alloc_pages(pool); + + return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ + int ret; + + BUG_ON(pool->order != compound_order(page)); + + ret = ion_page_pool_add(pool, page); + if (ret) + ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ + int count = pool->low_count; + + if (high) + count += pool->high_count; + + return count << pool->order; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int freed; + bool high; + + if (current_is_kswapd()) + high = true; + else + high = !!(gfp_mask & __GFP_HIGHMEM); + + if (nr_to_scan == 0) + return ion_page_pool_total(pool, high); + + for (freed = 0; freed < nr_to_scan; freed++) { + struct page *page; + + mutex_lock(&pool->mutex); + if (pool->low_count) { + page = ion_page_pool_remove(pool, false); + } else if (high && pool->high_count) { + page = ion_page_pool_remove(pool, true); + } else { + mutex_unlock(&pool->mutex); + break; + } + mutex_unlock(&pool->mutex); + ion_page_pool_free_pages(pool, page); + } + + return freed; +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), + GFP_KERNEL); + if (!pool) + return NULL; + pool->high_count = 0; + pool->low_count = 0; + INIT_LIST_HEAD(&pool->low_items); + INIT_LIST_HEAD(&pool->high_items); + pool->gfp_mask = gfp_mask | __GFP_COMP; + pool->order = order; + mutex_init(&pool->mutex); + plist_node_init(&pool->list, order); + + return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ + kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ + return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 000000000..18a5f93e1 --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,405 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @private_flags: internal buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging +*/ +struct ion_buffer { + struct kref ref; + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + unsigned long private_flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. + */ +struct ion_heap_ops { + int (*allocate)(struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free)(struct ion_buffer *buffer); + int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct sg_table * (*map_dma)(struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); + int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, + struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_shrinker + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_shrink - drain the deferred free + * list, skipping any heap-specific + * pooling or caching mechanisms + * + * @heap: the heap + * @size: amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. + */ +size_t ion_heap_freelist_shrink(struct ion_heap *heap, + size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant performance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant performance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +struct page *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + * device + * @dev: the device the pages will be used with + * @page: the first page to be flushed + * @size: size in bytes of region to be flushed + * @dir: direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 000000000..da2a63c0a --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,422 @@ +/* + * drivers/staging/android/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | + __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ + int i; + + for (i = 0; i < num_orders; i++) + if (order == orders[i]) + return i; + BUG(); + return -1; +} + +static inline unsigned int order_to_size(int order) +{ + return PAGE_SIZE << order; +} + +struct ion_system_heap { + struct ion_heap heap; + struct ion_page_pool *pools[0]; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long order) +{ + bool cached = ion_buffer_cached(buffer); + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + struct page *page; + + if (!cached) { + page = ion_page_pool_alloc(pool); + } else { + gfp_t gfp_flags = low_order_gfp_flags; + + if (order > 4) + gfp_flags = high_order_gfp_flags; + page = alloc_pages(gfp_flags | __GFP_COMP, order); + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + } + + return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, struct page *page) +{ + unsigned int order = compound_order(page); + bool cached = ion_buffer_cached(buffer); + + if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + + ion_page_pool_free(pool, page); + } else { + __free_pages(page, order); + } +} + + +static struct page *alloc_largest_available(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < num_orders; i++) { + if (size < order_to_size(orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_buffer_page(heap, buffer, orders[i]); + if (!page) + continue; + + return page; + } + + return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i = 0; + unsigned long size_remaining = PAGE_ALIGN(size); + unsigned int max_order = orders[0]; + + if (align > PAGE_SIZE) + return -EINVAL; + + if (size / PAGE_SIZE > totalram_pages / 2) + return -ENOMEM; + + INIT_LIST_HEAD(&pages); + while (size_remaining > 0) { + page = alloc_largest_available(sys_heap, buffer, size_remaining, + max_order); + if (!page) + goto free_pages; + list_add_tail(&page->lru, &pages); + size_remaining -= PAGE_SIZE << compound_order(page); + max_order = compound_order(page); + i++; + } + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + goto free_pages; + + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_table; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); + sg = sg_next(sg); + list_del(&page->lru); + } + + buffer->priv_virt = table; + return 0; + +free_table: + kfree(table); +free_pages: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + free_buffer_page(sys_heap, buffer, page); + return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ + struct ion_system_heap *sys_heap = container_of(buffer->heap, + struct ion_system_heap, + heap); + struct sg_table *table = buffer->sg_table; + bool cached = ion_buffer_cached(buffer); + struct scatterlist *sg; + int i; + + /* uncached pages come from the page pools, zero them before returning + for security purposes (other allocations are zerod at alloc time */ + if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) + ion_heap_buffer_zero(buffer); + + for_each_sg(table->sgl, sg, table->nents, i) + free_buffer_page(sys_heap, buffer, sg_page(sg)); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, + int nr_to_scan) +{ + struct ion_system_heap *sys_heap; + int nr_total = 0; + int i; + + sys_heap = container_of(heap, struct ion_system_heap, heap); + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); + } + + return nr_total; +} + +static struct ion_heap_ops system_heap_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, + .shrink = ion_system_heap_shrink, +}; + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, + void *unused) +{ + + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", + pool->high_count, pool->order, + (PAGE_SIZE << pool->order) * pool->high_count); + seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", + pool->low_count, pool->order, + (PAGE_SIZE << pool->order) * pool->low_count); + } + return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_system_heap *heap; + int i; + + heap = kzalloc(sizeof(struct ion_system_heap) + + sizeof(struct ion_page_pool *) * num_orders, + GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->heap.ops = &system_heap_ops; + heap->heap.type = ION_HEAP_TYPE_SYSTEM; + heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; + + if (orders[i] > 4) + gfp_flags = high_order_gfp_flags; + pool = ion_page_pool_create(gfp_flags, orders[i]); + if (!pool) + goto destroy_pools; + heap->pools[i] = pool; + } + + heap->heap.debug_show = ion_system_heap_debug_show; + return &heap->heap; + +destroy_pools: + while (i--) + ion_page_pool_destroy(heap->pools[i]); + kfree(heap); + return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) + ion_page_pool_destroy(sys_heap->pools[i]); + kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + int order = get_order(len); + struct page *page; + struct sg_table *table; + unsigned long i; + int ret; + + if (align > (PAGE_SIZE << order)) + return -EINVAL; + + page = alloc_pages(low_order_gfp_flags, order); + if (!page) + return -ENOMEM; + + split_page(page, order); + + len = PAGE_ALIGN(len); + for (i = len >> PAGE_SHIFT; i < (1 << order); i++) + __free_page(page + i); + + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto free_pages; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto free_table; + + sg_set_page(table->sgl, page, len, 0); + + buffer->priv_virt = table; + + ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + + return 0; + +free_table: + kfree(table); +free_pages: + for (i = 0; i < len >> PAGE_SHIFT; i++) + __free_page(page + i); + + return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; + unsigned long i; + + for (i = 0; i < pages; i++) + __free_page(page + i); + sg_free_table(table); + kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + *addr = page_to_phys(page); + *len = buffer->size; + return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_contig_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 000000000..3bc461cbb --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,283 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { + struct miscdevice misc; +}; + +struct ion_test_data { + struct dma_buf *dma_buf; + struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, + void __user *ptr, size_t offset, size_t size, bool write) +{ + int ret = 0; + struct dma_buf_attachment *attach; + struct sg_table *table; + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + struct sg_page_iter sg_iter; + unsigned long offset_page; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) + return PTR_ERR(attach); + + table = dma_buf_map_attachment(attach, dir); + if (IS_ERR(table)) + return PTR_ERR(table); + + offset_page = offset >> PAGE_SHIFT; + offset %= PAGE_SIZE; + + for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { + struct page *page = sg_page_iter_page(&sg_iter); + void *vaddr = vmap(&page, 1, VM_MAP, pgprot); + size_t to_copy = PAGE_SIZE - offset; + + to_copy = min(to_copy, size); + if (!vaddr) { + ret = -ENOMEM; + goto err; + } + + if (write) + ret = copy_from_user(vaddr + offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + offset, to_copy); + + vunmap(vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + size -= to_copy; + if (!size) + break; + ptr += to_copy; + offset = 0; + } + +err: + dma_buf_unmap_attachment(attach, table, dir); + dma_buf_detach(dma_buf, attach); + return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, + size_t offset, size_t size, bool write) +{ + int ret; + unsigned long page_offset = offset >> PAGE_SHIFT; + size_t copy_offset = offset % PAGE_SIZE; + size_t copy_size = size; + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (offset > dma_buf->size || size > dma_buf->size - offset) + return -EINVAL; + + ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + if (ret) + return ret; + + while (copy_size > 0) { + size_t to_copy; + void *vaddr = dma_buf_kmap(dma_buf, page_offset); + + if (!vaddr) + goto err; + + to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + + if (write) + ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + + dma_buf_kunmap(dma_buf, page_offset, vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + + copy_size -= to_copy; + ptr += to_copy; + page_offset++; + copy_offset = 0; + } +err: + dma_buf_end_cpu_access(dma_buf, offset, size, dir); + return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ion_test_data *test_data = filp->private_data; + int ret = 0; + + union { + struct ion_test_rw_data test_rw; + } data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_TEST_SET_FD: + { + struct dma_buf *dma_buf = NULL; + int fd = arg; + + if (fd >= 0) { + dma_buf = dma_buf_get((int)arg); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + } + if (test_data->dma_buf) + dma_buf_put(test_data->dma_buf); + test_data->dma_buf = dma_buf; + break; + } + case ION_IOC_TEST_DMA_MAPPING: + { + ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + case ION_IOC_TEST_KERNEL_MAPPING: + { + ret = ion_handle_test_kernel(test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + } + return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ + struct ion_test_data *data; + struct miscdevice *miscdev = file->private_data; + + data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = miscdev->parent; + + file->private_data = data; + + return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ + struct ion_test_data *data = file->private_data; + + kfree(data); + + return 0; +} + +static const struct file_operations ion_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ion_test_ioctl, + .compat_ioctl = ion_test_ioctl, + .open = ion_test_open, + .release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ + int ret; + struct ion_test_device *testdev; + + testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), + GFP_KERNEL); + if (!testdev) + return -ENOMEM; + + testdev->misc.minor = MISC_DYNAMIC_MINOR; + testdev->misc.name = "ion-test"; + testdev->misc.fops = &ion_test_fops; + testdev->misc.parent = &pdev->dev; + ret = misc_register(&testdev->misc); + if (ret) { + pr_err("failed to register misc device.\n"); + return ret; + } + + platform_set_drvdata(pdev, testdev); + + return 0; +} + +static struct platform_driver ion_test_platform_driver = { + .driver = { + .name = "ion-test", + }, +}; + +static int __init ion_test_init(void) +{ + platform_device_register_simple("ion-test", -1, NULL, 0); + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ + platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 000000000..11cd003fb --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 000000000..5b8ef0e66 --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,80 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = devm_kzalloc(&pdev->dev, + sizeof(struct ion_heap *) * pdata->nr, + GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) + return PTR_ERR(idev); + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; ++i) + ion_heap_destroy(heaps[i]); + return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 000000000..feafa172b --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,207 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static uint32_t lowmem_debug_level = 1; +static short lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; +static int lowmem_adj_size = 4; +static int lowmem_minfree[6] = { + 3 * 512, /* 6MB */ + 2 * 1024, /* 8MB */ + 4 * 1024, /* 16MB */ + 16 * 1024, /* 64MB */ +}; +static int lowmem_minfree_size = 4; + +static unsigned long lowmem_deathpending_timeout; + +#define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + pr_info(x); \ + } while (0) + +static unsigned long lowmem_count(struct shrinker *s, + struct shrink_control *sc) +{ + return global_page_state(NR_ACTIVE_ANON) + + global_page_state(NR_ACTIVE_FILE) + + global_page_state(NR_INACTIVE_ANON) + + global_page_state(NR_INACTIVE_FILE); +} + +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +{ + struct task_struct *tsk; + struct task_struct *selected = NULL; + unsigned long rem = 0; + int tasksize; + int i; + short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int selected_tasksize = 0; + short selected_oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; + int other_file = global_page_state(NR_FILE_PAGES) - + global_page_state(NR_SHMEM) - + total_swapcache_pages(); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for (i = 0; i < array_size; i++) { + if (other_free < lowmem_minfree[i] && + other_file < lowmem_minfree[i]) { + min_score_adj = lowmem_adj[i]; + break; + } + } + + lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n", + sc->nr_to_scan, sc->gfp_mask, other_free, + other_file, min_score_adj); + + if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { + lowmem_print(5, "lowmem_scan %lu, %x, return 0\n", + sc->nr_to_scan, sc->gfp_mask); + return 0; + } + + selected_oom_score_adj = min_score_adj; + + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + short oom_score_adj; + + if (tsk->flags & PF_KTHREAD) + continue; + + p = find_lock_task_mm(tsk); + if (!p) + continue; + + if (test_tsk_thread_flag(p, TIF_MEMDIE) && + time_before_eq(jiffies, lowmem_deathpending_timeout)) { + task_unlock(p); + rcu_read_unlock(); + return 0; + } + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { + task_unlock(p); + continue; + } + tasksize = get_mm_rss(p->mm); + task_unlock(p); + if (tasksize <= 0) + continue; + if (selected) { + if (oom_score_adj < selected_oom_score_adj) + continue; + if (oom_score_adj == selected_oom_score_adj && + tasksize <= selected_tasksize) + continue; + } + selected = p; + selected_tasksize = tasksize; + selected_oom_score_adj = oom_score_adj; + lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n", + p->pid, p->comm, oom_score_adj, tasksize); + } + if (selected) { + lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n", + selected->pid, selected->comm, + selected_oom_score_adj, selected_tasksize); + lowmem_deathpending_timeout = jiffies + HZ; + /* + * FIXME: lowmemorykiller shouldn't abuse global OOM killer + * infrastructure. There is no real reason why the selected + * task should have access to the memory reserves. + */ + mark_tsk_oom_victim(selected); + send_sig(SIGKILL, selected, 0); + rem += selected_tasksize; + } + + lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", + sc->nr_to_scan, sc->gfp_mask, rem); + rcu_read_unlock(); + return rem; +} + +static struct shrinker lowmem_shrinker = { + .scan_objects = lowmem_scan, + .count_objects = lowmem_count, + .seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + return 0; +} + +static void __exit lowmem_exit(void) +{ + unregister_shrinker(&lowmem_shrinker); +} + +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, + S_IRUGO | S_IWUSR); +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, + S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); + +module_init(lowmem_init); +module_exit(lowmem_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c new file mode 100644 index 000000000..c90838d36 --- /dev/null +++ b/drivers/staging/android/sw_sync.c @@ -0,0 +1,267 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sw_sync.h" + +static int sw_sync_cmp(u32 a, u32 b) +{ + if (a == b) + return 0; + + return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ + struct sw_sync_pt *pt; + + pt = (struct sw_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + + pt->value = value; + + return (struct sync_pt *)pt; +} +EXPORT_SYMBOL(sw_sync_pt_create); + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt_parent(sync_pt); + + return (struct sync_pt *)sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt_parent(sync_pt); + + return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; + struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + + return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, + void *data, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + if (size < sizeof(pt->value)) + return -ENOMEM; + + memcpy(data, &pt->value, sizeof(pt->value)); + + return sizeof(pt->value); +} + +static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, + char *str, int size) +{ + struct sw_sync_timeline *timeline = + (struct sw_sync_timeline *)sync_timeline; + snprintf(str, size, "%d", timeline->value); +} + +static void sw_sync_pt_value_str(struct sync_pt *sync_pt, + char *str, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + snprintf(str, size, "%d", pt->value); +} + +static struct sync_timeline_ops sw_sync_timeline_ops = { + .driver_name = "sw_sync", + .dup = sw_sync_pt_dup, + .has_signaled = sw_sync_pt_has_signaled, + .compare = sw_sync_pt_compare, + .fill_driver_data = sw_sync_fill_driver_data, + .timeline_value_str = sw_sync_timeline_value_str, + .pt_value_str = sw_sync_pt_value_str, +}; + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *) + sync_timeline_create(&sw_sync_timeline_ops, + sizeof(struct sw_sync_timeline), + name); + + return obj; +} +EXPORT_SYMBOL(sw_sync_timeline_create); + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ + obj->value += inc; + + sync_timeline_signal(&obj->obj); +} +EXPORT_SYMBOL(sw_sync_timeline_inc); + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +static int sw_sync_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (obj == NULL) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +static int sw_sync_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + + sync_timeline_destroy(&obj->obj); + return 0; +} + +static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, + unsigned long arg) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + int err; + struct sync_pt *pt; + struct sync_fence *fence; + struct sw_sync_create_fence_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err; + } + + pt = sw_sync_pt_create(obj, data.value); + if (pt == NULL) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, pt); + if (fence == NULL) { + sync_pt_free(pt); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_fence_put(fence); + err = -EFAULT; + goto err; + } + + sync_fence_install(fence, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +static long sw_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_fops = { + .owner = THIS_MODULE, + .open = sw_sync_open, + .release = sw_sync_release, + .unlocked_ioctl = sw_sync_ioctl, + .compat_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_fops, +}; + +static int __init sw_sync_device_init(void) +{ + return misc_register(&sw_sync_dev); +} + +static void __exit sw_sync_device_remove(void) +{ + misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h new file mode 100644 index 000000000..c87ae9ebf --- /dev/null +++ b/drivers/staging/android/sw_sync.h @@ -0,0 +1,59 @@ +/* + * include/linux/sw_sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SW_SYNC_H +#define _LINUX_SW_SYNC_H + +#include +#include +#include "sync.h" +#include "uapi/sw_sync.h" + +struct sw_sync_timeline { + struct sync_timeline obj; + + u32 value; +}; + +struct sw_sync_pt { + struct sync_pt pt; + + u32 value; +}; + +#if IS_ENABLED(CONFIG_SW_SYNC) +struct sw_sync_timeline *sw_sync_timeline_create(const char *name); +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */ + +#endif /* _LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c new file mode 100644 index 000000000..f83e00c78 --- /dev/null +++ b/drivers/staging/android/sync.c @@ -0,0 +1,729 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sync.h" + +#define CREATE_TRACE_POINTS +#include "trace/sync.h" + +static const struct fence_ops android_fence_ops; +static const struct file_operations sync_fence_fops; + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name) +{ + struct sync_timeline *obj; + + if (size < sizeof(struct sync_timeline)) + return NULL; + + obj = kzalloc(size, GFP_KERNEL); + if (obj == NULL) + return NULL; + + kref_init(&obj->kref); + obj->ops = ops; + obj->context = fence_context_alloc(1); + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->child_list_lock); + + sync_timeline_debug_add(obj); + + return obj; +} +EXPORT_SYMBOL(sync_timeline_create); + +static void sync_timeline_free(struct kref *kref) +{ + struct sync_timeline *obj = + container_of(kref, struct sync_timeline, kref); + + sync_timeline_debug_remove(obj); + + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + + kfree(obj); +} + +static void sync_timeline_get(struct sync_timeline *obj) +{ + kref_get(&obj->kref); +} + +static void sync_timeline_put(struct sync_timeline *obj) +{ + kref_put(&obj->kref, sync_timeline_free); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ + obj->destroyed = true; + /* + * Ensure timeline is marked as destroyed before + * changing timeline's fences status. + */ + smp_wmb(); + + /* + * signal any children that their parent is going away. + */ + sync_timeline_signal(obj); + sync_timeline_put(obj); +} +EXPORT_SYMBOL(sync_timeline_destroy); + +void sync_timeline_signal(struct sync_timeline *obj) +{ + unsigned long flags; + LIST_HEAD(signaled_pts); + struct sync_pt *pt, *next; + + trace_sync_timeline(obj); + + spin_lock_irqsave(&obj->child_list_lock, flags); + + list_for_each_entry_safe(pt, next, &obj->active_list_head, + active_list) { + if (fence_is_signaled_locked(&pt->base)) + list_del_init(&pt->active_list); + } + + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} +EXPORT_SYMBOL(sync_timeline_signal); + +struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) +{ + unsigned long flags; + struct sync_pt *pt; + + if (size < sizeof(struct sync_pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (pt == NULL) + return NULL; + + spin_lock_irqsave(&obj->child_list_lock, flags); + sync_timeline_get(obj); + fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, + obj->context, ++obj->value); + list_add_tail(&pt->child_list, &obj->child_list_head); + INIT_LIST_HEAD(&pt->active_list); + spin_unlock_irqrestore(&obj->child_list_lock, flags); + return pt; +} +EXPORT_SYMBOL(sync_pt_create); + +void sync_pt_free(struct sync_pt *pt) +{ + fence_put(&pt->base); +} +EXPORT_SYMBOL(sync_pt_free); + +static struct sync_fence *sync_fence_alloc(int size, const char *name) +{ + struct sync_fence *fence; + + fence = kzalloc(size, GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, + fence, 0); + if (IS_ERR(fence->file)) + goto err; + + kref_init(&fence->kref); + strlcpy(fence->name, name, sizeof(fence->name)); + + init_waitqueue_head(&fence->wq); + + return fence; + +err: + kfree(fence); + return NULL; +} + +static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) +{ + struct sync_fence_cb *check; + struct sync_fence *fence; + + check = container_of(cb, struct sync_fence_cb, cb); + fence = check->fence; + + if (atomic_dec_and_test(&fence->status)) + wake_up_all(&fence->wq); +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ + struct sync_fence *fence; + + fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); + if (fence == NULL) + return NULL; + + fence->num_fences = 1; + atomic_set(&fence->status, 1); + + fence->cbs[0].sync_pt = &pt->base; + fence->cbs[0].fence = fence; + if (fence_add_callback(&pt->base, &fence->cbs[0].cb, + fence_check_cb_func)) + atomic_dec(&fence->status); + + sync_fence_debug_add(fence); + + return fence; +} +EXPORT_SYMBOL(sync_fence_create); + +struct sync_fence *sync_fence_fdget(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return NULL; + + if (file->f_op != &sync_fence_fops) + goto err; + + return file->private_data; + +err: + fput(file); + return NULL; +} +EXPORT_SYMBOL(sync_fence_fdget); + +void sync_fence_put(struct sync_fence *fence) +{ + fput(fence->file); +} +EXPORT_SYMBOL(sync_fence_put); + +void sync_fence_install(struct sync_fence *fence, int fd) +{ + fd_install(fd, fence->file); +} +EXPORT_SYMBOL(sync_fence_install); + +static void sync_fence_add_pt(struct sync_fence *fence, + int *i, struct fence *pt) +{ + fence->cbs[*i].sync_pt = pt; + fence->cbs[*i].fence = fence; + + if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { + fence_get(pt); + (*i)++; + } +} + +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b) +{ + int num_fences = a->num_fences + b->num_fences; + struct sync_fence *fence; + int i, i_a, i_b; + unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); + + fence = sync_fence_alloc(size, name); + if (fence == NULL) + return NULL; + + atomic_set(&fence->status, num_fences); + + /* + * Assume sync_fence a and b are both ordered and have no + * duplicates with the same context. + * + * If a sync_fence can only be created with sync_fence_merge + * and sync_fence_create, this is a reasonable assumption. + */ + for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { + struct fence *pt_a = a->cbs[i_a].sync_pt; + struct fence *pt_b = b->cbs[i_b].sync_pt; + + if (pt_a->context < pt_b->context) { + sync_fence_add_pt(fence, &i, pt_a); + + i_a++; + } else if (pt_a->context > pt_b->context) { + sync_fence_add_pt(fence, &i, pt_b); + + i_b++; + } else { + if (pt_a->seqno - pt_b->seqno <= INT_MAX) + sync_fence_add_pt(fence, &i, pt_a); + else + sync_fence_add_pt(fence, &i, pt_b); + + i_a++; + i_b++; + } + } + + for (; i_a < a->num_fences; i_a++) + sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); + + for (; i_b < b->num_fences; i_b++) + sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); + + if (num_fences > i) + atomic_sub(num_fences - i, &fence->status); + fence->num_fences = i; + + sync_fence_debug_add(fence); + return fence; +} +EXPORT_SYMBOL(sync_fence_merge); + +int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, + int wake_flags, void *key) +{ + struct sync_fence_waiter *wait; + + wait = container_of(curr, struct sync_fence_waiter, work); + list_del_init(&wait->work.task_list); + + wait->callback(wait->work.private, wait); + return 1; +} + +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + int err = atomic_read(&fence->status); + unsigned long flags; + + if (err < 0) + return err; + + if (!err) + return 1; + + init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); + waiter->work.private = fence; + + spin_lock_irqsave(&fence->wq.lock, flags); + err = atomic_read(&fence->status); + if (err > 0) + __add_wait_queue_tail(&fence->wq, &waiter->work); + spin_unlock_irqrestore(&fence->wq.lock, flags); + + if (err < 0) + return err; + + return !err; +} +EXPORT_SYMBOL(sync_fence_wait_async); + +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&fence->wq.lock, flags); + if (!list_empty(&waiter->work.task_list)) + list_del_init(&waiter->work.task_list); + else + ret = -ENOENT; + spin_unlock_irqrestore(&fence->wq.lock, flags); + return ret; +} +EXPORT_SYMBOL(sync_fence_cancel_async); + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ + long ret; + int i; + + if (timeout < 0) + timeout = MAX_SCHEDULE_TIMEOUT; + else + timeout = msecs_to_jiffies(timeout); + + trace_sync_wait(fence, 1); + for (i = 0; i < fence->num_fences; ++i) + trace_sync_pt(fence->cbs[i].sync_pt); + ret = wait_event_interruptible_timeout(fence->wq, + atomic_read(&fence->status) <= 0, + timeout); + trace_sync_wait(fence, 0); + + if (ret < 0) { + return ret; + } else if (ret == 0) { + if (timeout) { + pr_info("fence timeout on [%p] after %dms\n", fence, + jiffies_to_msecs(timeout)); + sync_dump(); + } + return -ETIME; + } + + ret = atomic_read(&fence->status); + if (ret) { + pr_info("fence error %ld on [%p]\n", ret, fence); + sync_dump(); + } + return ret; +} +EXPORT_SYMBOL(sync_fence_wait); + +static const char *android_fence_get_driver_name(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + return parent->ops->driver_name; +} + +static const char *android_fence_get_timeline_name(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + return parent->name; +} + +static void android_fence_release(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + unsigned long flags; + + spin_lock_irqsave(fence->lock, flags); + list_del(&pt->child_list); + if (WARN_ON_ONCE(!list_empty(&pt->active_list))) + list_del(&pt->active_list); + spin_unlock_irqrestore(fence->lock, flags); + + if (parent->ops->free_pt) + parent->ops->free_pt(pt); + + sync_timeline_put(parent); + fence_free(&pt->base); +} + +static bool android_fence_signaled(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + int ret; + + ret = parent->ops->has_signaled(pt); + if (ret < 0) + fence->status = ret; + return ret; +} + +static bool android_fence_enable_signaling(struct fence *fence) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (android_fence_signaled(fence)) + return false; + + list_add_tail(&pt->active_list, &parent->active_list_head); + return true; +} + +static int android_fence_fill_driver_data(struct fence *fence, + void *data, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->fill_driver_data) + return 0; + return parent->ops->fill_driver_data(pt, data, size); +} + +static void android_fence_value_str(struct fence *fence, + char *str, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->pt_value_str) { + if (size) + *str = 0; + return; + } + parent->ops->pt_value_str(pt, str, size); +} + +static void android_fence_timeline_value_str(struct fence *fence, + char *str, int size) +{ + struct sync_pt *pt = container_of(fence, struct sync_pt, base); + struct sync_timeline *parent = sync_pt_parent(pt); + + if (!parent->ops->timeline_value_str) { + if (size) + *str = 0; + return; + } + parent->ops->timeline_value_str(parent, str, size); +} + +static const struct fence_ops android_fence_ops = { + .get_driver_name = android_fence_get_driver_name, + .get_timeline_name = android_fence_get_timeline_name, + .enable_signaling = android_fence_enable_signaling, + .signaled = android_fence_signaled, + .wait = fence_default_wait, + .release = android_fence_release, + .fill_driver_data = android_fence_fill_driver_data, + .fence_value_str = android_fence_value_str, + .timeline_value_str = android_fence_timeline_value_str, +}; + +static void sync_fence_free(struct kref *kref) +{ + struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + int i, status = atomic_read(&fence->status); + + for (i = 0; i < fence->num_fences; ++i) { + if (status) + fence_remove_callback(fence->cbs[i].sync_pt, + &fence->cbs[i].cb); + fence_put(fence->cbs[i].sync_pt); + } + + kfree(fence); +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ + struct sync_fence *fence = file->private_data; + + sync_fence_debug_remove(fence); + + kref_put(&fence->kref, sync_fence_free); + return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ + struct sync_fence *fence = file->private_data; + int status; + + poll_wait(file, &fence->wq, wait); + + status = atomic_read(&fence->status); + + if (!status) + return POLLIN; + else if (status < 0) + return POLLERR; + return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ + __s32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + int err; + struct sync_fence *fence2, *fence3; + struct sync_merge_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err_put_fd; + } + + fence2 = sync_fence_fdget(data.fd2); + if (fence2 == NULL) { + err = -ENOENT; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence3 = sync_fence_merge(data.name, fence, fence2); + if (fence3 == NULL) { + err = -ENOMEM; + goto err_put_fence2; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + err = -EFAULT; + goto err_put_fence3; + } + + sync_fence_install(fence3, fd); + sync_fence_put(fence2); + return 0; + +err_put_fence3: + sync_fence_put(fence3); + +err_put_fence2: + sync_fence_put(fence2); + +err_put_fd: + put_unused_fd(fd); + return err; +} + +static int sync_fill_pt_info(struct fence *fence, void *data, int size) +{ + struct sync_pt_info *info = data; + int ret; + + if (size < sizeof(struct sync_pt_info)) + return -ENOMEM; + + info->len = sizeof(struct sync_pt_info); + + if (fence->ops->fill_driver_data) { + ret = fence->ops->fill_driver_data(fence, info->driver_data, + size - sizeof(*info)); + if (ret < 0) + return ret; + + info->len += ret; + } + + strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), + sizeof(info->obj_name)); + strlcpy(info->driver_name, fence->ops->get_driver_name(fence), + sizeof(info->driver_name)); + if (fence_is_signaled(fence)) + info->status = fence->status >= 0 ? 1 : fence->status; + else + info->status = 0; + info->timestamp_ns = ktime_to_ns(fence->timestamp); + + return info->len; +} + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, + unsigned long arg) +{ + struct sync_fence_info_data *data; + __u32 size; + __u32 len = 0; + int ret, i; + + if (copy_from_user(&size, (void __user *)arg, sizeof(size))) + return -EFAULT; + + if (size < sizeof(struct sync_fence_info_data)) + return -EINVAL; + + if (size > 4096) + size = 4096; + + data = kzalloc(size, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + strlcpy(data->name, fence->name, sizeof(data->name)); + data->status = atomic_read(&fence->status); + if (data->status >= 0) + data->status = !data->status; + + len = sizeof(struct sync_fence_info_data); + + for (i = 0; i < fence->num_fences; ++i) { + struct fence *pt = fence->cbs[i].sync_pt; + + ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + + if (ret < 0) + goto out; + + len += ret; + } + + data->len = len; + + if (copy_to_user((void __user *)arg, data, len)) + ret = -EFAULT; + else + ret = 0; + +out: + kfree(data); + + return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_fence *fence = file->private_data; + + switch (cmd) { + case SYNC_IOC_WAIT: + return sync_fence_ioctl_wait(fence, arg); + + case SYNC_IOC_MERGE: + return sync_fence_ioctl_merge(fence, arg); + + case SYNC_IOC_FENCE_INFO: + return sync_fence_ioctl_fence_info(fence, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, + .compat_ioctl = sync_fence_ioctl, +}; + diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h new file mode 100644 index 000000000..a21b79fb4 --- /dev/null +++ b/drivers/staging/android/sync.h @@ -0,0 +1,356 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include +#include +#include +#include +#include +#include +#include + +#include "uapi/sync.h" + +struct sync_timeline; +struct sync_pt; +struct sync_fence; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name: name of the implementation + * @dup: duplicate a sync_pt + * @has_signaled: returns: + * 1 if pt has signaled + * 0 if pt has not signaled + * <0 on error + * @compare: returns: + * 1 if b will signal before a + * 0 if a and b will signal at the same time + * -1 if a will signal before b + * @free_pt: called before sync_pt is freed + * @release_obj: called before sync_timeline is freed + * @fill_driver_data: write implementation specific driver data to data. + * should return an error if there is not enough room + * as specified by size. This information is returned + * to userspace by SYNC_IOC_FENCE_INFO. + * @timeline_value_str: fill str with the value of the sync_timeline's counter + * @pt_value_str: fill str with the value of the sync_pt + */ +struct sync_timeline_ops { + const char *driver_name; + + /* required */ + struct sync_pt * (*dup)(struct sync_pt *pt); + + /* required */ + int (*has_signaled)(struct sync_pt *pt); + + /* required */ + int (*compare)(struct sync_pt *a, struct sync_pt *b); + + /* optional */ + void (*free_pt)(struct sync_pt *sync_pt); + + /* optional */ + void (*release_obj)(struct sync_timeline *sync_timeline); + + /* optional */ + int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + + /* optional */ + void (*timeline_value_str)(struct sync_timeline *timeline, char *str, + int size); + + /* optional */ + void (*pt_value_str)(struct sync_pt *pt, char *str, int size); +}; + +/** + * struct sync_timeline - sync object + * @kref: reference count on fence. + * @ops: ops that define the implementation of the sync_timeline + * @name: name of the sync_timeline. Useful for debugging + * @destroyed: set when sync_timeline is destroyed + * @child_list_head: list of children sync_pts for this sync_timeline + * @child_list_lock: lock protecting @child_list_head, destroyed, and + * sync_pt.status + * @active_list_head: list of active (unsignaled/errored) sync_pts + * @sync_timeline_list: membership in global sync_timeline_list + */ +struct sync_timeline { + struct kref kref; + const struct sync_timeline_ops *ops; + char name[32]; + + /* protected by child_list_lock */ + bool destroyed; + int context, value; + + struct list_head child_list_head; + spinlock_t child_list_lock; + + struct list_head active_list_head; + +#ifdef CONFIG_DEBUG_FS + struct list_head sync_timeline_list; +#endif +}; + +/** + * struct sync_pt - sync point + * @fence: base fence class + * @child_list: membership in sync_timeline.child_list_head + * @active_list: membership in sync_timeline.active_list_head + * @signaled_list: membership in temporary signaled_list on stack + * @fence: sync_fence to which the sync_pt belongs + * @pt_list: membership in sync_fence.pt_list_head + * @status: 1: signaled, 0:active, <0: error + * @timestamp: time which sync_pt status transitioned from active to + * signaled or error. + */ +struct sync_pt { + struct fence base; + + struct list_head child_list; + struct list_head active_list; +}; + +static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +{ + return container_of(pt->base.lock, struct sync_timeline, + child_list_lock); +} + +struct sync_fence_cb { + struct fence_cb cb; + struct fence *sync_pt; + struct sync_fence *fence; +}; + +/** + * struct sync_fence - sync fence + * @file: file representing this fence + * @kref: reference count on fence. + * @name: name of sync_fence. Useful for debugging + * @pt_list_head: list of sync_pts in the fence. immutable once fence + * is created + * @status: 0: signaled, >0:active, <0: error + * + * @wq: wait queue for fence signaling + * @sync_fence_list: membership in global fence list + */ +struct sync_fence { + struct file *file; + struct kref kref; + char name[32]; +#ifdef CONFIG_DEBUG_FS + struct list_head sync_fence_list; +#endif + int num_fences; + + wait_queue_head_t wq; + atomic_t status; + + struct sync_fence_cb cbs[]; +}; + +struct sync_fence_waiter; +typedef void (*sync_callback_t)(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list: membership in sync_fence.waiter_list_head + * @callback: function pointer to call when fence signals + * @callback_data: pointer to pass to @callback + */ +struct sync_fence_waiter { + wait_queue_t work; + sync_callback_t callback; +}; + +static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, + sync_callback_t callback) +{ + INIT_LIST_HEAD(&waiter->work.task_list); + waiter->callback = callback; +} + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops: specifies the implementation ops for the object + * @size: size to allocate for this obj + * @name: sync_timeline name + * + * Creates a new sync_timeline which will use the implementation specified by + * @ops. @size bytes will be allocated allowing for implementation specific + * data to be kept after the generic sync_timeline struct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name); + +/** + * sync_timeline_destroy() - destroys a sync object + * @obj: sync_timeline to destroy + * + * A sync implementation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its children + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj: sync_timeline to signal + * + * A sync implementation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent: sync_pt's parent sync_timeline + * @size: size to allocate for this pt + * + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt: sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name: name of fence to create + * @pt: sync_pt to add to the fence + * + * Creates a fence containg @pt. Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name: name of new fence + * @a: fence a + * @b: fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b. @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd: fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a reference of a sync fence + * @fence: fence to put + * + * Puts a reference on @fence. If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence: fence to install + * @fd: file descriptor in which to install the fence + * + * Installs @fence into @fd. @fd's should be acquired through + * get_unused_fd_flags(O_CLOEXEC). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error. + * @waiter should be initialized with sync_fence_waiter_init(). + */ +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_cancel_async() - cancels an async wait + * @fence: fence to wait on + * @waiter: waiter callback struck + * + * returns 0 if waiter was removed from fence's async waiter list. + * returns -ENOENT if waiter was not found on fence's async waiter list. + * + * Cancels a previously registered async wait. Will fail gracefully if + * @waiter was never registered or if @fence has already signaled @waiter. + */ +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter); + +/** + * sync_fence_wait() - wait on fence + * @fence: fence to wait on + * @tiemout: timeout in ms + * + * Wait for @fence to be signaled or have an error. Waits indefinitely + * if @timeout < 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +#ifdef CONFIG_DEBUG_FS + +extern void sync_timeline_debug_add(struct sync_timeline *obj); +extern void sync_timeline_debug_remove(struct sync_timeline *obj); +extern void sync_fence_debug_add(struct sync_fence *fence); +extern void sync_fence_debug_remove(struct sync_fence *fence); +extern void sync_dump(void); + +#else +# define sync_timeline_debug_add(obj) +# define sync_timeline_debug_remove(obj) +# define sync_fence_debug_add(fence) +# define sync_fence_debug_remove(fence) +# define sync_dump() +#endif +int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, + int wake_flags, void *key); + +#endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c new file mode 100644 index 000000000..91ed2c4cf --- /dev/null +++ b/drivers/staging/android/sync_debug.c @@ -0,0 +1,254 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sync.h" + +#ifdef CONFIG_DEBUG_FS + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +void sync_timeline_debug_add(struct sync_timeline *obj) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); +} + +void sync_timeline_debug_remove(struct sync_timeline *obj) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); +} + +void sync_fence_debug_add(struct sync_fence *fence) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); +} + +void sync_fence_debug_remove(struct sync_fence *fence) +{ + unsigned long flags; + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); +} + +static const char *sync_status_str(int status) +{ + if (status == 0) + return "signaled"; + + if (status > 0) + return "active"; + + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = 1; + struct sync_timeline *parent = sync_pt_parent(pt); + + if (fence_is_signaled_locked(&pt->base)) + status = pt->base.status; + + seq_printf(s, " %s%spt %s", + fence ? parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + + if (status <= 0) { + struct timespec64 ts64 = + ktime_to_timespec64(pt->base.timestamp); + + seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); + } + + if (parent->ops->timeline_value_str && + parent->ops->pt_value_str) { + char value[64]; + + parent->ops->pt_value_str(pt, value, sizeof(value)); + seq_printf(s, ": %s", value); + if (fence) { + parent->ops->timeline_value_str(parent, value, + sizeof(value)); + seq_printf(s, " / %s", value); + } + } + + seq_puts(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->timeline_value_str) { + char value[64]; + + obj->ops->timeline_value_str(obj, value, sizeof(value)); + seq_printf(s, ": %s", value); + } + + seq_puts(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + wait_queue_t *pos; + unsigned long flags; + int i; + + seq_printf(s, "[%p] %s: %s\n", fence, fence->name, + sync_status_str(atomic_read(&fence->status))); + + for (i = 0; i < fence->num_fences; ++i) { + struct sync_pt *pt = + container_of(fence->cbs[i].sync_pt, + struct sync_pt, base); + + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->wq.lock, flags); + list_for_each_entry(pos, &fence->wq.task_list, task_list) { + struct sync_fence_waiter *waiter; + + if (pos->func != &sync_fence_wake_up_wq) + continue; + + waiter = container_of(pos, struct sync_fence_waiter, work); + + seq_printf(s, "waiter %pF\n", waiter->callback); + } + spin_unlock_irqrestore(&fence->wq.lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_puts(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_puts(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_puts(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_puts(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +void sync_dump(void) +{ + struct seq_file s = { + .buf = sync_dump_buf, + .size = sizeof(sync_dump_buf) - 1, + }; + int i; + + sync_debugfs_show(&s, NULL); + + for (i = 0; i < s.count; i += DUMP_CHUNK) { + if ((s.count - i) > DUMP_CHUNK) { + char c = s.buf[i + DUMP_CHUNK]; + + s.buf[i + DUMP_CHUNK] = 0; + pr_cont("%s", s.buf + i); + s.buf[i + DUMP_CHUNK] = c; + } else { + s.buf[s.count] = 0; + pr_cont("%s", s.buf + i); + } + } +} + +#endif diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c new file mode 100644 index 000000000..938a35cd9 --- /dev/null +++ b/drivers/staging/android/timed_gpio.c @@ -0,0 +1,168 @@ +/* drivers/misc/timed_gpio.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "timed_output.h" +#include "timed_gpio.h" + + +struct timed_gpio_data { + struct timed_output_dev dev; + struct hrtimer timer; + spinlock_t lock; + unsigned gpio; + int max_timeout; + u8 active_low; +}; + +static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) +{ + struct timed_gpio_data *data = + container_of(timer, struct timed_gpio_data, timer); + + gpio_direction_output(data->gpio, data->active_low ? 1 : 0); + return HRTIMER_NORESTART; +} + +static int gpio_get_time(struct timed_output_dev *dev) +{ + struct timed_gpio_data *data; + ktime_t t; + + data = container_of(dev, struct timed_gpio_data, dev); + + if (!hrtimer_active(&data->timer)) + return 0; + + t = hrtimer_get_remaining(&data->timer); + + return ktime_to_ms(t); +} + +static void gpio_enable(struct timed_output_dev *dev, int value) +{ + struct timed_gpio_data *data = + container_of(dev, struct timed_gpio_data, dev); + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + + /* cancel previous timer and set GPIO according to value */ + hrtimer_cancel(&data->timer); + gpio_direction_output(data->gpio, data->active_low ? !value : !!value); + + if (value > 0) { + if (value > data->max_timeout) + value = data->max_timeout; + + hrtimer_start(&data->timer, + ktime_set(value / 1000, (value % 1000) * 1000000), + HRTIMER_MODE_REL); + } + + spin_unlock_irqrestore(&data->lock, flags); +} + +static int timed_gpio_probe(struct platform_device *pdev) +{ + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; + struct timed_gpio *cur_gpio; + struct timed_gpio_data *gpio_data, *gpio_dat; + int i, ret; + + if (!pdata) + return -EBUSY; + + gpio_data = devm_kzalloc(&pdev->dev, + sizeof(struct timed_gpio_data) * pdata->num_gpios, + GFP_KERNEL); + if (!gpio_data) + return -ENOMEM; + + for (i = 0; i < pdata->num_gpios; i++) { + cur_gpio = &pdata->gpios[i]; + gpio_dat = &gpio_data[i]; + + hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + gpio_dat->timer.function = gpio_timer_func; + spin_lock_init(&gpio_dat->lock); + + gpio_dat->dev.name = cur_gpio->name; + gpio_dat->dev.get_time = gpio_get_time; + gpio_dat->dev.enable = gpio_enable; + ret = gpio_request(cur_gpio->gpio, cur_gpio->name); + if (ret < 0) + goto err_out; + ret = timed_output_dev_register(&gpio_dat->dev); + if (ret < 0) { + gpio_free(cur_gpio->gpio); + goto err_out; + } + + gpio_dat->gpio = cur_gpio->gpio; + gpio_dat->max_timeout = cur_gpio->max_timeout; + gpio_dat->active_low = cur_gpio->active_low; + gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low); + } + + platform_set_drvdata(pdev, gpio_data); + + return 0; + +err_out: + while (--i >= 0) { + timed_output_dev_unregister(&gpio_data[i].dev); + gpio_free(gpio_data[i].gpio); + } + + return ret; +} + +static int timed_gpio_remove(struct platform_device *pdev) +{ + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; + struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < pdata->num_gpios; i++) { + timed_output_dev_unregister(&gpio_data[i].dev); + gpio_free(gpio_data[i].gpio); + } + + return 0; +} + +static struct platform_driver timed_gpio_driver = { + .probe = timed_gpio_probe, + .remove = timed_gpio_remove, + .driver = { + .name = TIMED_GPIO_NAME, + }, +}; + +module_platform_driver(timed_gpio_driver); + +MODULE_AUTHOR("Mike Lockwood "); +MODULE_DESCRIPTION("timed gpio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h new file mode 100644 index 000000000..d29e169d7 --- /dev/null +++ b/drivers/staging/android/timed_gpio.h @@ -0,0 +1,33 @@ +/* include/linux/timed_gpio.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_GPIO_H +#define _LINUX_TIMED_GPIO_H + +#define TIMED_GPIO_NAME "timed-gpio" + +struct timed_gpio { + const char *name; + unsigned gpio; + int max_timeout; + u8 active_low; +}; + +struct timed_gpio_platform_data { + int num_gpios; + struct timed_gpio *gpios; +}; + +#endif diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c new file mode 100644 index 000000000..b41429f37 --- /dev/null +++ b/drivers/staging/android/timed_output.c @@ -0,0 +1,120 @@ +/* drivers/misc/timed_output.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Mike Lockwood + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "timed_output: " fmt + +#include +#include +#include +#include +#include + +#include "timed_output.h" + +static struct class *timed_output_class; +static atomic_t device_count; + +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct timed_output_dev *tdev = dev_get_drvdata(dev); + int remaining = tdev->get_time(tdev); + + return sprintf(buf, "%d\n", remaining); +} + +static ssize_t enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct timed_output_dev *tdev = dev_get_drvdata(dev); + int value; + int rc; + + rc = kstrtoint(buf, 0, &value); + if (rc != 0) + return -EINVAL; + + tdev->enable(tdev, value); + + return size; +} +static DEVICE_ATTR_RW(enable); + +static struct attribute *timed_output_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; +ATTRIBUTE_GROUPS(timed_output); + +static int create_timed_output_class(void) +{ + if (!timed_output_class) { + timed_output_class = class_create(THIS_MODULE, "timed_output"); + if (IS_ERR(timed_output_class)) + return PTR_ERR(timed_output_class); + atomic_set(&device_count, 0); + timed_output_class->dev_groups = timed_output_groups; + } + + return 0; +} + +int timed_output_dev_register(struct timed_output_dev *tdev) +{ + int ret; + + if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time) + return -EINVAL; + + ret = create_timed_output_class(); + if (ret < 0) + return ret; + + tdev->index = atomic_inc_return(&device_count); + tdev->dev = device_create(timed_output_class, NULL, + MKDEV(0, tdev->index), NULL, "%s", tdev->name); + if (IS_ERR(tdev->dev)) + return PTR_ERR(tdev->dev); + + dev_set_drvdata(tdev->dev, tdev); + tdev->state = 0; + return 0; +} +EXPORT_SYMBOL_GPL(timed_output_dev_register); + +void timed_output_dev_unregister(struct timed_output_dev *tdev) +{ + tdev->enable(tdev, 0); + device_destroy(timed_output_class, MKDEV(0, tdev->index)); +} +EXPORT_SYMBOL_GPL(timed_output_dev_unregister); + +static int __init timed_output_init(void) +{ + return create_timed_output_class(); +} + +static void __exit timed_output_exit(void) +{ + class_destroy(timed_output_class); +} + +module_init(timed_output_init); +module_exit(timed_output_exit); + +MODULE_AUTHOR("Mike Lockwood "); +MODULE_DESCRIPTION("timed output class driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h new file mode 100644 index 000000000..13d2ca51c --- /dev/null +++ b/drivers/staging/android/timed_output.h @@ -0,0 +1,37 @@ +/* include/linux/timed_output.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_OUTPUT_H +#define _LINUX_TIMED_OUTPUT_H + +struct timed_output_dev { + const char *name; + + /* enable the output and set the timer */ + void (*enable)(struct timed_output_dev *sdev, int timeout); + + /* returns the current number of milliseconds remaining on the timer */ + int (*get_time)(struct timed_output_dev *sdev); + + /* private data */ + struct device *dev; + int index; + int state; +}; + +int timed_output_dev_register(struct timed_output_dev *dev); +void timed_output_dev_unregister(struct timed_output_dev *dev); + +#endif diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h new file mode 100644 index 000000000..77edb977a --- /dev/null +++ b/drivers/staging/android/trace/sync.h @@ -0,0 +1,82 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM sync + +#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SYNC_H + +#include "../sync.h" +#include + +TRACE_EVENT(sync_timeline, + TP_PROTO(struct sync_timeline *timeline), + + TP_ARGS(timeline), + + TP_STRUCT__entry( + __string(name, timeline->name) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(name, timeline->name); + if (timeline->ops->timeline_value_str) { + timeline->ops->timeline_value_str(timeline, + __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(name), __entry->value) +); + +TRACE_EVENT(sync_wait, + TP_PROTO(struct sync_fence *fence, int begin), + + TP_ARGS(fence, begin), + + TP_STRUCT__entry( + __string(name, fence->name) + __field(s32, status) + __field(u32, begin) + ), + + TP_fast_assign( + __assign_str(name, fence->name); + __entry->status = atomic_read(&fence->status); + __entry->begin = begin; + ), + + TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", + __get_str(name), __entry->status) +); + +TRACE_EVENT(sync_pt, + TP_PROTO(struct fence *pt), + + TP_ARGS(pt), + + TP_STRUCT__entry( + __string(timeline, pt->ops->get_timeline_name(pt)) + __array(char, value, 32) + ), + + TP_fast_assign( + __assign_str(timeline, pt->ops->get_timeline_name(pt)); + if (pt->ops->fence_value_str) { + pt->ops->fence_value_str(pt, __entry->value, + sizeof(__entry->value)); + } else { + __entry->value[0] = '\0'; + } + ), + + TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) +); + +#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h new file mode 100644 index 000000000..ba4743c71 --- /dev/null +++ b/drivers/staging/android/uapi/ashmem.h @@ -0,0 +1,47 @@ +/* + * drivers/staging/android/uapi/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed. It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _UAPI_LINUX_ASHMEM_H +#define _UAPI_LINUX_ASHMEM_H + +#include + +#define ASHMEM_NAME_LEN 256 + +#define ASHMEM_NAME_DEF "dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED 0 +#define ASHMEM_WAS_PURGED 1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED 0 +#define ASHMEM_IS_PINNED 1 + +struct ashmem_pin { + __u32 offset; /* offset into region, in bytes, page-aligned */ + __u32 len; /* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC 0x77 + +#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) + +#endif /* _UAPI_LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 000000000..6aa495673 --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include +#include + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask + * is used to identify the heaps, so only 32 + * total heap types are supported + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8) + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created + at mmap time, if this is set + caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @heap_id_mask: mask of heap ids to allocate from + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to + * refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_id_mask; + unsigned int flags; + ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + ion_user_handle_t handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 000000000..ffef06f63 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include +#include + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr: a pointer to an area at least as large as size + * @offset: offset into the ion buffer to start reading + * @size: size to read or write + * @write: 1 to write, 0 to read + */ +struct ion_test_rw_data { + __u64 ptr; + __u64 offset; + __u64 size; + int write; + int __padding; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ + _IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping. Can be + * used by unit tests to emulate a DMA engine as close as possible. Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping. Can be + * used by unit tests to test heap map_kernel functions. Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h new file mode 100644 index 000000000..9b5d48695 --- /dev/null +++ b/drivers/staging/android/uapi/sw_sync.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SW_SYNC_H +#define _UAPI_LINUX_SW_SYNC_H + +#include + +struct sw_sync_create_fence_data { + __u32 value; + char name[32]; + __s32 fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC 'W' + +#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\ + struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +#endif /* _UAPI_LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h new file mode 100644 index 000000000..e964c751f --- /dev/null +++ b/drivers/staging/android/uapi/sync.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SYNC_H +#define _UAPI_LINUX_SYNC_H + +#include +#include + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2: file descriptor of second fence + * @name: name of new fence + * @fence: returns the fd of the new fence to userspace + */ +struct sync_merge_data { + __s32 fd2; /* fd of second fence */ + char name[32]; /* name of new fence */ + __s32 fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len: length of sync_pt_info including any driver_data + * @obj_name: name of parent sync_timeline + * @driver_name: name of driver implementing the parent + * @status: status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns: timestamp of status change in nanoseconds + * @driver_data: any driver dependent data + */ +struct sync_pt_info { + __u32 len; + char obj_name[32]; + char driver_name[32]; + __s32 status; + __u64 timestamp_ns; + + __u8 driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len: ioctl caller writes the size of the buffer its passing in. + * ioctl returns length of sync_fence_data returned to userspace + * including pt_info. + * @name: name of fence + * @status: status of fence. 1: signaled 0:active <0:error + * @pt_info: a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { + __u32 len; + char name[32]; + __s32 status; + + __u8 pt_info[0]; +}; + +#define SYNC_IOC_MAGIC '>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds. Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data. Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len. On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ + struct sync_fence_info_data) + +#endif /* _UAPI_LINUX_SYNC_H */ -- cgit v1.2.3-54-g00ecf