diff options
Diffstat (limited to 'drivers/staging/android')
-rw-r--r-- | drivers/staging/android/Kconfig | 9 | ||||
-rw-r--r-- | drivers/staging/android/ashmem.c | 47 | ||||
-rw-r--r-- | drivers/staging/android/ion/hisilicon/hi6220_ion.c | 5 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion.c | 139 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion.h | 20 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_carveout_heap.c | 8 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_page_pool.c | 4 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_system_heap.c | 2 | ||||
-rw-r--r-- | drivers/staging/android/ion/ion_test.c | 4 | ||||
-rw-r--r-- | drivers/staging/android/ion/tegra/tegra_ion.c | 7 | ||||
-rw-r--r-- | drivers/staging/android/lowmemorykiller.c | 26 | ||||
-rw-r--r-- | drivers/staging/android/sw_sync.c | 191 | ||||
-rw-r--r-- | drivers/staging/android/sw_sync.h | 8 | ||||
-rw-r--r-- | drivers/staging/android/sync.c | 469 | ||||
-rw-r--r-- | drivers/staging/android/sync.h | 241 | ||||
-rw-r--r-- | drivers/staging/android/sync_debug.c | 221 | ||||
-rw-r--r-- | drivers/staging/android/timed_gpio.c | 5 | ||||
-rw-r--r-- | drivers/staging/android/trace/sync.h | 44 | ||||
-rw-r--r-- | drivers/staging/android/uapi/ashmem.h | 1 | ||||
-rw-r--r-- | drivers/staging/android/uapi/sync.h | 37 |
20 files changed, 598 insertions, 890 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 42b15126a..bd90d2002 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -57,15 +57,6 @@ config SW_SYNC synchronization. Useful when there is no hardware primitive backing the synchronization. -config SW_SYNC_USER - bool "Userspace API for SW_SYNC" - default n - depends on SW_SYNC - ---help--- - Provides a user space API to the sw sync object. - *WARNING* improper use of this can result in deadlocking kernel - drivers from userspace. - source "drivers/staging/android/ion/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index f4f8f0373..2650202d7 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -106,21 +106,34 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly; #define range_on_lru(range) \ ((range)->purged == ASHMEM_NOT_PURGED) -#define page_range_subsumes_range(range, start, end) \ - (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) +static inline int page_range_subsumes_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); +} -#define page_range_subsumed_by_range(range, start, end) \ - (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) +static inline int page_range_subsumed_by_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); +} -#define page_in_range(range, page) \ - (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) +static inline int page_in_range(struct ashmem_range *range, size_t page) +{ + return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); +} -#define page_range_in_range(range, start, end) \ - (page_in_range(range, start) || page_in_range(range, end) || \ - page_range_subsumes_range(range, start, end)) +static inline int page_range_in_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (page_in_range(range, start) || page_in_range(range, end) || + page_range_subsumes_range(range, start, end)); +} -#define range_before_page(range, page) \ - ((range)->pgend < (page)) +static inline int range_before_page(struct ashmem_range *range, size_t page) +{ + return ((range)->pgend < (page)); +} #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) @@ -372,8 +385,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } /* requested protection bits must match our allowed protection mask */ - if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & - calc_vm_prot_bits(PROT_MASK))) { + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & + calc_vm_prot_bits(PROT_MASK, 0))) { ret = -EPERM; goto out; } @@ -441,7 +454,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; - mutex_lock(&ashmem_mutex); + if (!mutex_trylock(&ashmem_mutex)) + return -1; + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; @@ -661,8 +676,8 @@ restart: if (page_range_subsumed_by_range(range, pgstart, pgend)) return 0; if (page_range_in_range(range, pgstart, pgend)) { - pgstart = min_t(size_t, range->pgstart, pgstart); - pgend = max_t(size_t, range->pgend, pgend); + pgstart = min(range->pgstart, pgstart); + pgend = max(range->pgend, pgend); purged |= range->purged; range_del(range); goto restart; diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c index e3c07b2ba..fe9f0fd21 100644 --- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c +++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c @@ -214,10 +214,7 @@ static struct platform_driver hi6220_ion_driver = { static int __init hi6220_ion_init(void) { - int ret; - - ret = platform_driver_register(&hi6220_ion_driver); - return ret; + return platform_driver_register(&hi6220_ion_driver); } subsys_initcall(hi6220_ion_init); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index df560216d..85365672c 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -387,13 +387,22 @@ static void ion_handle_get(struct ion_handle *handle) kref_get(&handle->ref); } -static int ion_handle_put(struct ion_handle *handle) +static int ion_handle_put_nolock(struct ion_handle *handle) +{ + int ret; + + ret = kref_put(&handle->ref, ion_handle_destroy); + + return ret; +} + +int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); - ret = kref_put(&handle->ref, ion_handle_destroy); + ret = ion_handle_put_nolock(handle); mutex_unlock(&client->lock); return ret; @@ -417,20 +426,30 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client, return ERR_PTR(-EINVAL); } -static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, int id) { struct ion_handle *handle; - mutex_lock(&client->lock); handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); - mutex_unlock(&client->lock); return handle ? handle : ERR_PTR(-EINVAL); } +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, id); + mutex_unlock(&client->lock); + + return handle; +} + static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { @@ -532,22 +551,28 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, } EXPORT_SYMBOL(ion_alloc); -void ion_free(struct ion_client *client, struct ion_handle *handle) +static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); - mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); - mutex_unlock(&client->lock); return; } + ion_handle_put_nolock(handle); +} + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + ion_free_nolock(client, handle); mutex_unlock(&client->lock); - ion_handle_put(handle); } EXPORT_SYMBOL(ion_free); @@ -677,6 +702,34 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) } EXPORT_SYMBOL(ion_unmap_kernel); +static struct mutex debugfs_mutex; +static struct rb_root *ion_root_client; +static int is_client_alive(struct ion_client *client) +{ + struct rb_node *node; + struct ion_client *tmp; + struct ion_device *dev; + + node = ion_root_client->rb_node; + dev = container_of(ion_root_client, struct ion_device, clients); + + down_read(&dev->lock); + while (node) { + tmp = rb_entry(node, struct ion_client, node); + if (client < tmp) { + node = node->rb_left; + } else if (client > tmp) { + node = node->rb_right; + } else { + up_read(&dev->lock); + return 1; + } + } + + up_read(&dev->lock); + return 0; +} + static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; @@ -685,6 +738,14 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) const char *names[ION_NUM_HEAP_IDS] = {NULL}; int i; + mutex_lock(&debugfs_mutex); + if (!is_client_alive(client)) { + seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", + client); + mutex_unlock(&debugfs_mutex); + return 0; + } + mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, @@ -696,6 +757,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) sizes[id] += handle->buffer->size; } mutex_unlock(&client->lock); + mutex_unlock(&debugfs_mutex); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAP_IDS; i++) { @@ -832,6 +894,7 @@ void ion_client_destroy(struct ion_client *client) struct rb_node *n; pr_debug("%s: %d\n", __func__, __LINE__); + mutex_lock(&debugfs_mutex); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); @@ -850,6 +913,7 @@ void ion_client_destroy(struct ion_client *client) kfree(client->display_name); kfree(client->name); kfree(client); + mutex_unlock(&debugfs_mutex); } EXPORT_SYMBOL(ion_client_destroy); @@ -1059,8 +1123,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, { } -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; @@ -1078,15 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, return PTR_ERR_OR_ZERO(vaddr); } -static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, - size_t len, - enum dma_data_direction direction) +static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); + + return 0; } static struct dma_buf_ops dma_buf_ops = { @@ -1153,22 +1217,18 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) } EXPORT_SYMBOL(ion_share_dma_buf_fd); -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf) { - struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; - dmabuf = dma_buf_get(fd); - if (IS_ERR(dmabuf)) - return ERR_CAST(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); - dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; @@ -1196,11 +1256,25 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) } end: - dma_buf_put(dmabuf); return handle; } EXPORT_SYMBOL(ion_import_dma_buf); +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_handle *handle; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_CAST(dmabuf); + + handle = ion_import_dma_buf(client, dmabuf); + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf_fd); + static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; @@ -1283,11 +1357,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_handle *handle; - handle = ion_handle_get_by_id(client, data.handle.handle); - if (IS_ERR(handle)) + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, data.handle.handle); + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); return PTR_ERR(handle); - ion_free(client, handle); - ion_handle_put(handle); + } + ion_free_nolock(client, handle); + ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); break; } case ION_IOC_SHARE: @@ -1308,7 +1386,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_handle *handle; - handle = ion_import_dma_buf(client, data.fd.fd); + handle = ion_import_dma_buf_fd(client, data.fd.fd); if (IS_ERR(handle)) ret = PTR_ERR(handle); else @@ -1405,6 +1483,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); seq_puts(s, "----------------------------------------------------\n"); + mutex_lock(&debugfs_mutex); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); @@ -1423,6 +1502,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) client->pid, size); } } + mutex_unlock(&debugfs_mutex); + seq_puts(s, "----------------------------------------------------\n"); seq_puts(s, "orphaned allocations (info is from last known client):\n"); mutex_lock(&dev->buffer_lock); @@ -1474,7 +1555,7 @@ static int debug_shrink_set(void *data, u64 val) struct shrink_control sc; int objs; - sc.gfp_mask = -1; + sc.gfp_mask = GFP_HIGHUSER; sc.nr_to_scan = val; if (!val) { @@ -1492,7 +1573,7 @@ static int debug_shrink_get(void *data, u64 *val) struct shrink_control sc; int objs; - sc.gfp_mask = -1; + sc.gfp_mask = GFP_HIGHUSER; sc.nr_to_scan = 0; objs = heap->shrinker.count_objects(&heap->shrinker, &sc); @@ -1607,6 +1688,8 @@ debugfs_done: init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; + ion_root_client = &idev->clients; + mutex_init(&debugfs_mutex); return idev; } EXPORT_SYMBOL(ion_device_create); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index b860c5f57..a1331fc16 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -192,14 +192,26 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); /** - * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * ion_import_dma_buf() - get ion_handle from dma-buf + * @client: the client + * @dmabuf: the dma-buf + * + * Get the ion_buffer associated with the dma-buf and return the ion_handle. + * If no ion_handle exists for this buffer, return newly created ion_handle. + * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf); + +/** + * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle * @client: the client * @fd: the dma-buf fd * - * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, - * import that fd and return a handle representing it. If a dma-buf from + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, + * import that fd and return a handle representing it. If a dma-buf from * another exporter is passed in this function will return ERR_PTR(-EINVAL) */ -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); #endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 9156d8238..1fb0d8155 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -81,7 +81,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, if (align > PAGE_SIZE) return -EINVAL; - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; ret = sg_alloc_table(table, 1, GFP_KERNEL); @@ -117,7 +117,7 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) if (ion_buffer_cached(buffer)) dma_sync_sg_for_device(NULL, table->sgl, table->nents, - DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); ion_carveout_free(heap, paddr, buffer->size); sg_free_table(table); @@ -163,11 +163,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) if (ret) return ERR_PTR(ret); - carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL); if (!carveout_heap) return ERR_PTR(-ENOMEM); - carveout_heap->pool = gen_pool_create(12, -1); + carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1); if (!carveout_heap->pool) { kfree(carveout_heap); return ERR_PTR(-ENOMEM); diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index fd7e23e0c..1fe80165a 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -149,8 +149,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) { - struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), - GFP_KERNEL); + struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) return NULL; pool->high_count = 0; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index d4c3e5512..b69dfc706 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -27,7 +27,7 @@ #include "ion_priv.h" static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | - __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM; + __GFP_NORETRY) & ~__GFP_RECLAIM; static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); static const unsigned int orders[] = {8, 4, 0}; static const int num_orders = ARRAY_SIZE(orders); diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c index 58d46893e..83a3af06d 100644 --- a/drivers/staging/android/ion/ion_test.c +++ b/drivers/staging/android/ion/ion_test.c @@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, if (offset > dma_buf->size || size > dma_buf->size - offset) return -EINVAL; - ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + ret = dma_buf_begin_cpu_access(dma_buf, dir); if (ret) return ret; @@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, copy_offset = 0; } err: - dma_buf_end_cpu_access(dma_buf, offset, size, dir); + dma_buf_end_cpu_access(dma_buf, dir); return ret; } diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c index 4d3c516cc..49e55e5ac 100644 --- a/drivers/staging/android/ion/tegra/tegra_ion.c +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -33,12 +33,11 @@ static int tegra_ion_probe(struct platform_device *pdev) num_heaps = pdata->nr; - heaps = devm_kzalloc(&pdev->dev, - sizeof(struct ion_heap *) * pdata->nr, - GFP_KERNEL); + heaps = devm_kcalloc(&pdev->dev, pdata->nr, + sizeof(struct ion_heap *), GFP_KERNEL); idev = ion_device_create(NULL); - if (IS_ERR_OR_NULL(idev)) + if (IS_ERR(idev)) return PTR_ERR(idev); /* create the heaps as specified in the board file */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 8b5a4a82d..2509e5df7 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -50,6 +50,7 @@ static short lowmem_adj[6] = { 6, 12, }; + static int lowmem_adj_size = 4; static int lowmem_minfree[6] = { 3 * 512, /* 6MB */ @@ -57,6 +58,7 @@ static int lowmem_minfree[6] = { 4 * 1024, /* 16MB */ 16 * 1024, /* 64MB */ }; + static int lowmem_minfree_size = 4; static unsigned long lowmem_deathpending_timeout; @@ -84,6 +86,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); @@ -97,8 +100,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { - if (other_free < lowmem_minfree[i] && - other_file < lowmem_minfree[i]) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } @@ -153,8 +156,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; - lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n", - p->pid, p->comm, oom_score_adj, tasksize); + lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { task_lock(selected); @@ -167,9 +170,18 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (selected->mm) mark_oom_victim(selected); task_unlock(selected); - lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n", - selected->pid, selected->comm, - selected_oom_score_adj, selected_tasksize); + lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" + " to free %ldkB on behalf of '%s' (%d) because\n" + " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" + " Free memory is %ldkB above reserved\n", + selected->comm, selected->pid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + other_file * (long)(PAGE_SIZE / 1024), + minfree * (long)(PAGE_SIZE / 1024), + min_score_adj, + other_free * (long)(PAGE_SIZE / 1024)); lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; } diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index c4ff1679e..af39ff58f 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c @@ -25,15 +25,7 @@ #include "sw_sync.h" -static int sw_sync_cmp(u32 a, u32 b) -{ - if (a == b) - return 0; - - return ((s32)a - (s32)b) < 0 ? -1 : 1; -} - -struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) { struct sw_sync_pt *pt; @@ -42,47 +34,17 @@ struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) pt->value = value; - return (struct sync_pt *)pt; + return (struct fence *)pt; } EXPORT_SYMBOL(sw_sync_pt_create); -static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) -{ - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; - struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt_parent(sync_pt); - - return (struct sync_pt *)sw_sync_pt_create(obj, pt->value); -} - -static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +static int sw_sync_fence_has_signaled(struct fence *fence) { - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_pt *pt = (struct sw_sync_pt *)fence; struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt_parent(sync_pt); - - return sw_sync_cmp(obj->value, pt->value) >= 0; -} - -static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) -{ - struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; - struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; - - return sw_sync_cmp(pt_a->value, pt_b->value); -} - -static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, - void *data, int size) -{ - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; - - if (size < sizeof(pt->value)) - return -ENOMEM; + (struct sw_sync_timeline *)fence_parent(fence); - memcpy(data, &pt->value, sizeof(pt->value)); - - return sizeof(pt->value); + return (pt->value > obj->value) ? 0 : 1; } static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, @@ -93,22 +55,18 @@ static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, snprintf(str, size, "%d", timeline->value); } -static void sw_sync_pt_value_str(struct sync_pt *sync_pt, - char *str, int size) +static void sw_sync_fence_value_str(struct fence *fence, char *str, int size) { - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_pt *pt = (struct sw_sync_pt *)fence; snprintf(str, size, "%d", pt->value); } static struct sync_timeline_ops sw_sync_timeline_ops = { .driver_name = "sw_sync", - .dup = sw_sync_pt_dup, - .has_signaled = sw_sync_pt_has_signaled, - .compare = sw_sync_pt_compare, - .fill_driver_data = sw_sync_fill_driver_data, + .has_signaled = sw_sync_fence_has_signaled, .timeline_value_str = sw_sync_timeline_value_str, - .pt_value_str = sw_sync_pt_value_str, + .fence_value_str = sw_sync_fence_value_str, }; struct sw_sync_timeline *sw_sync_timeline_create(const char *name) @@ -129,132 +87,3 @@ void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) sync_timeline_signal(&obj->obj); } EXPORT_SYMBOL(sw_sync_timeline_inc); - -#ifdef CONFIG_SW_SYNC_USER -/* *WARNING* - * - * improper use of this can result in deadlocking kernel drivers from userspace. - */ - -/* opening sw_sync create a new sync obj */ -static int sw_sync_open(struct inode *inode, struct file *file) -{ - struct sw_sync_timeline *obj; - char task_comm[TASK_COMM_LEN]; - - get_task_comm(task_comm, current); - - obj = sw_sync_timeline_create(task_comm); - if (!obj) - return -ENOMEM; - - file->private_data = obj; - - return 0; -} - -static int sw_sync_release(struct inode *inode, struct file *file) -{ - struct sw_sync_timeline *obj = file->private_data; - - sync_timeline_destroy(&obj->obj); - return 0; -} - -static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, - unsigned long arg) -{ - int fd = get_unused_fd_flags(O_CLOEXEC); - int err; - struct sync_pt *pt; - struct sync_fence *fence; - struct sw_sync_create_fence_data data; - - if (fd < 0) - return fd; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { - err = -EFAULT; - goto err; - } - - pt = sw_sync_pt_create(obj, data.value); - if (!pt) { - err = -ENOMEM; - goto err; - } - - data.name[sizeof(data.name) - 1] = '\0'; - fence = sync_fence_create(data.name, pt); - if (!fence) { - sync_pt_free(pt); - err = -ENOMEM; - goto err; - } - - data.fence = fd; - if (copy_to_user((void __user *)arg, &data, sizeof(data))) { - sync_fence_put(fence); - err = -EFAULT; - goto err; - } - - sync_fence_install(fence, fd); - - return 0; - -err: - put_unused_fd(fd); - return err; -} - -static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) -{ - u32 value; - - if (copy_from_user(&value, (void __user *)arg, sizeof(value))) - return -EFAULT; - - sw_sync_timeline_inc(obj, value); - - return 0; -} - -static long sw_sync_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct sw_sync_timeline *obj = file->private_data; - - switch (cmd) { - case SW_SYNC_IOC_CREATE_FENCE: - return sw_sync_ioctl_create_fence(obj, arg); - - case SW_SYNC_IOC_INC: - return sw_sync_ioctl_inc(obj, arg); - - default: - return -ENOTTY; - } -} - -static const struct file_operations sw_sync_fops = { - .owner = THIS_MODULE, - .open = sw_sync_open, - .release = sw_sync_release, - .unlocked_ioctl = sw_sync_ioctl, - .compat_ioctl = sw_sync_ioctl, -}; - -static struct miscdevice sw_sync_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "sw_sync", - .fops = &sw_sync_fops, -}; - -static int __init sw_sync_device_init(void) -{ - return misc_register(&sw_sync_dev); -} -device_initcall(sw_sync_device_init); - -#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index c87ae9ebf..e18667bfb 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -29,7 +29,7 @@ struct sw_sync_timeline { }; struct sw_sync_pt { - struct sync_pt pt; + struct fence pt; u32 value; }; @@ -38,7 +38,7 @@ struct sw_sync_pt { struct sw_sync_timeline *sw_sync_timeline_create(const char *name); void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); -struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); #else static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) { @@ -49,8 +49,8 @@ static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) { } -static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, - u32 value) +static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) { return NULL; } diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index ed43796b5..3a8f21031 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -32,7 +32,7 @@ #include "trace/sync.h" static const struct fence_ops android_fence_ops; -static const struct file_operations sync_fence_fops; +static const struct file_operations sync_file_fops; struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name) @@ -68,9 +68,6 @@ static void sync_timeline_free(struct kref *kref) sync_timeline_debug_remove(obj); - if (obj->ops->release_obj) - obj->ops->release_obj(obj); - kfree(obj); } @@ -93,10 +90,6 @@ void sync_timeline_destroy(struct sync_timeline *obj) */ smp_wmb(); - /* - * signal any children that their parent is going away. - */ - sync_timeline_signal(obj); sync_timeline_put(obj); } EXPORT_SYMBOL(sync_timeline_destroy); @@ -104,126 +97,115 @@ EXPORT_SYMBOL(sync_timeline_destroy); void sync_timeline_signal(struct sync_timeline *obj) { unsigned long flags; - LIST_HEAD(signaled_pts); - struct sync_pt *pt, *next; + struct fence *fence, *next; trace_sync_timeline(obj); spin_lock_irqsave(&obj->child_list_lock, flags); - list_for_each_entry_safe(pt, next, &obj->active_list_head, + list_for_each_entry_safe(fence, next, &obj->active_list_head, active_list) { - if (fence_is_signaled_locked(&pt->base)) - list_del_init(&pt->active_list); + if (fence_is_signaled_locked(fence)) + list_del_init(&fence->active_list); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } EXPORT_SYMBOL(sync_timeline_signal); -struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) +struct fence *sync_pt_create(struct sync_timeline *obj, int size) { unsigned long flags; - struct sync_pt *pt; + struct fence *fence; - if (size < sizeof(struct sync_pt)) + if (size < sizeof(*fence)) return NULL; - pt = kzalloc(size, GFP_KERNEL); - if (!pt) + fence = kzalloc(size, GFP_KERNEL); + if (!fence) return NULL; spin_lock_irqsave(&obj->child_list_lock, flags); sync_timeline_get(obj); - fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, + fence_init(fence, &android_fence_ops, &obj->child_list_lock, obj->context, ++obj->value); - list_add_tail(&pt->child_list, &obj->child_list_head); - INIT_LIST_HEAD(&pt->active_list); + list_add_tail(&fence->child_list, &obj->child_list_head); + INIT_LIST_HEAD(&fence->active_list); spin_unlock_irqrestore(&obj->child_list_lock, flags); - return pt; + return fence; } EXPORT_SYMBOL(sync_pt_create); -void sync_pt_free(struct sync_pt *pt) -{ - fence_put(&pt->base); -} -EXPORT_SYMBOL(sync_pt_free); - -static struct sync_fence *sync_fence_alloc(int size, const char *name) +static struct sync_file *sync_file_alloc(int size, const char *name) { - struct sync_fence *fence; + struct sync_file *sync_file; - fence = kzalloc(size, GFP_KERNEL); - if (!fence) + sync_file = kzalloc(size, GFP_KERNEL); + if (!sync_file) return NULL; - fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, - fence, 0); - if (IS_ERR(fence->file)) + sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops, + sync_file, 0); + if (IS_ERR(sync_file->file)) goto err; - kref_init(&fence->kref); - strlcpy(fence->name, name, sizeof(fence->name)); + kref_init(&sync_file->kref); + strlcpy(sync_file->name, name, sizeof(sync_file->name)); - init_waitqueue_head(&fence->wq); + init_waitqueue_head(&sync_file->wq); - return fence; + return sync_file; err: - kfree(fence); + kfree(sync_file); return NULL; } static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) { - struct sync_fence_cb *check; - struct sync_fence *fence; + struct sync_file_cb *check; + struct sync_file *sync_file; - check = container_of(cb, struct sync_fence_cb, cb); - fence = check->fence; + check = container_of(cb, struct sync_file_cb, cb); + sync_file = check->sync_file; - if (atomic_dec_and_test(&fence->status)) - wake_up_all(&fence->wq); + if (atomic_dec_and_test(&sync_file->status)) + wake_up_all(&sync_file->wq); } -/* TODO: implement a create which takes more that one sync_pt */ -struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt) +/* TODO: implement a create which takes more that one fence */ +struct sync_file *sync_file_create(const char *name, struct fence *fence) { - struct sync_fence *fence; + struct sync_file *sync_file; - fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); - if (!fence) + sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]), + name); + if (!sync_file) return NULL; - fence->num_fences = 1; - atomic_set(&fence->status, 1); + sync_file->num_fences = 1; + atomic_set(&sync_file->status, 1); - fence->cbs[0].sync_pt = pt; - fence->cbs[0].fence = fence; - if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func)) - atomic_dec(&fence->status); + sync_file->cbs[0].fence = fence; + sync_file->cbs[0].sync_file = sync_file; + if (fence_add_callback(fence, &sync_file->cbs[0].cb, + fence_check_cb_func)) + atomic_dec(&sync_file->status); - sync_fence_debug_add(fence); + sync_file_debug_add(sync_file); - return fence; + return sync_file; } -EXPORT_SYMBOL(sync_fence_create_dma); +EXPORT_SYMBOL(sync_file_create); -struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) -{ - return sync_fence_create_dma(name, &pt->base); -} -EXPORT_SYMBOL(sync_fence_create); - -struct sync_fence *sync_fence_fdget(int fd) +struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd); if (!file) return NULL; - if (file->f_op != &sync_fence_fops) + if (file->f_op != &sync_file_fops) goto err; return file->private_data; @@ -232,70 +214,71 @@ err: fput(file); return NULL; } -EXPORT_SYMBOL(sync_fence_fdget); +EXPORT_SYMBOL(sync_file_fdget); -void sync_fence_put(struct sync_fence *fence) +void sync_file_put(struct sync_file *sync_file) { - fput(fence->file); + fput(sync_file->file); } -EXPORT_SYMBOL(sync_fence_put); +EXPORT_SYMBOL(sync_file_put); -void sync_fence_install(struct sync_fence *fence, int fd) +void sync_file_install(struct sync_file *sync_file, int fd) { - fd_install(fd, fence->file); + fd_install(fd, sync_file->file); } -EXPORT_SYMBOL(sync_fence_install); +EXPORT_SYMBOL(sync_file_install); -static void sync_fence_add_pt(struct sync_fence *fence, - int *i, struct fence *pt) +static void sync_file_add_pt(struct sync_file *sync_file, int *i, + struct fence *fence) { - fence->cbs[*i].sync_pt = pt; - fence->cbs[*i].fence = fence; + sync_file->cbs[*i].fence = fence; + sync_file->cbs[*i].sync_file = sync_file; - if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { - fence_get(pt); + if (!fence_add_callback(fence, &sync_file->cbs[*i].cb, + fence_check_cb_func)) { + fence_get(fence); (*i)++; } } -struct sync_fence *sync_fence_merge(const char *name, - struct sync_fence *a, struct sync_fence *b) +struct sync_file *sync_file_merge(const char *name, + struct sync_file *a, struct sync_file *b) { int num_fences = a->num_fences + b->num_fences; - struct sync_fence *fence; + struct sync_file *sync_file; int i, i_a, i_b; - unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); + unsigned long size = offsetof(struct sync_file, cbs[num_fences]); - fence = sync_fence_alloc(size, name); - if (!fence) + sync_file = sync_file_alloc(size, name); + if (!sync_file) return NULL; - atomic_set(&fence->status, num_fences); + atomic_set(&sync_file->status, num_fences); /* - * Assume sync_fence a and b are both ordered and have no + * Assume sync_file a and b are both ordered and have no * duplicates with the same context. * - * If a sync_fence can only be created with sync_fence_merge - * and sync_fence_create, this is a reasonable assumption. + * If a sync_file can only be created with sync_file_merge + * and sync_file_create, this is a reasonable assumption. */ for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { - struct fence *pt_a = a->cbs[i_a].sync_pt; - struct fence *pt_b = b->cbs[i_b].sync_pt; + struct fence *pt_a = a->cbs[i_a].fence; + struct fence *pt_b = b->cbs[i_b].fence; if (pt_a->context < pt_b->context) { - sync_fence_add_pt(fence, &i, pt_a); + sync_file_add_pt(sync_file, &i, pt_a); i_a++; } else if (pt_a->context > pt_b->context) { - sync_fence_add_pt(fence, &i, pt_b); + sync_file_add_pt(sync_file, &i, pt_b); i_b++; } else { if (pt_a->seqno - pt_b->seqno <= INT_MAX) - sync_fence_add_pt(fence, &i, pt_a); + sync_file_add_pt(sync_file, &i, pt_a); else - sync_fence_add_pt(fence, &i, pt_b); + sync_file_add_pt(sync_file, &i, pt_b); i_a++; i_b++; @@ -303,156 +286,55 @@ struct sync_fence *sync_fence_merge(const char *name, } for (; i_a < a->num_fences; i_a++) - sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); + sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence); for (; i_b < b->num_fences; i_b++) - sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); + sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence); if (num_fences > i) - atomic_sub(num_fences - i, &fence->status); - fence->num_fences = i; - - sync_fence_debug_add(fence); - return fence; -} -EXPORT_SYMBOL(sync_fence_merge); - -int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, - int wake_flags, void *key) -{ - struct sync_fence_waiter *wait; - - wait = container_of(curr, struct sync_fence_waiter, work); - list_del_init(&wait->work.task_list); - - wait->callback(wait->work.private, wait); - return 1; -} - -int sync_fence_wait_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter) -{ - int err = atomic_read(&fence->status); - unsigned long flags; - - if (err < 0) - return err; - - if (!err) - return 1; - - init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); - waiter->work.private = fence; - - spin_lock_irqsave(&fence->wq.lock, flags); - err = atomic_read(&fence->status); - if (err > 0) - __add_wait_queue_tail(&fence->wq, &waiter->work); - spin_unlock_irqrestore(&fence->wq.lock, flags); - - if (err < 0) - return err; - - return !err; -} -EXPORT_SYMBOL(sync_fence_wait_async); - -int sync_fence_cancel_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&fence->wq.lock, flags); - if (!list_empty(&waiter->work.task_list)) - list_del_init(&waiter->work.task_list); - else - ret = -ENOENT; - spin_unlock_irqrestore(&fence->wq.lock, flags); - return ret; -} -EXPORT_SYMBOL(sync_fence_cancel_async); - -int sync_fence_wait(struct sync_fence *fence, long timeout) -{ - long ret; - int i; + atomic_sub(num_fences - i, &sync_file->status); + sync_file->num_fences = i; - if (timeout < 0) - timeout = MAX_SCHEDULE_TIMEOUT; - else - timeout = msecs_to_jiffies(timeout); - - trace_sync_wait(fence, 1); - for (i = 0; i < fence->num_fences; ++i) - trace_sync_pt(fence->cbs[i].sync_pt); - ret = wait_event_interruptible_timeout(fence->wq, - atomic_read(&fence->status) <= 0, - timeout); - trace_sync_wait(fence, 0); - - if (ret < 0) { - return ret; - } else if (ret == 0) { - if (timeout) { - pr_info("fence timeout on [%p] after %dms\n", fence, - jiffies_to_msecs(timeout)); - sync_dump(); - } - return -ETIME; - } - - ret = atomic_read(&fence->status); - if (ret) { - pr_info("fence error %ld on [%p]\n", ret, fence); - sync_dump(); - } - return ret; + sync_file_debug_add(sync_file); + return sync_file; } -EXPORT_SYMBOL(sync_fence_wait); +EXPORT_SYMBOL(sync_file_merge); static const char *android_fence_get_driver_name(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); return parent->ops->driver_name; } static const char *android_fence_get_timeline_name(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); return parent->name; } static void android_fence_release(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); unsigned long flags; spin_lock_irqsave(fence->lock, flags); - list_del(&pt->child_list); - if (WARN_ON_ONCE(!list_empty(&pt->active_list))) - list_del(&pt->active_list); + list_del(&fence->child_list); + if (WARN_ON_ONCE(!list_empty(&fence->active_list))) + list_del(&fence->active_list); spin_unlock_irqrestore(fence->lock, flags); - if (parent->ops->free_pt) - parent->ops->free_pt(pt); - sync_timeline_put(parent); - fence_free(&pt->base); + fence_free(fence); } static bool android_fence_signaled(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); int ret; - ret = parent->ops->has_signaled(pt); + ret = parent->ops->has_signaled(fence); if (ret < 0) fence->status = ret; return ret; @@ -460,46 +342,32 @@ static bool android_fence_signaled(struct fence *fence) static bool android_fence_enable_signaling(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); if (android_fence_signaled(fence)) return false; - list_add_tail(&pt->active_list, &parent->active_list_head); + list_add_tail(&fence->active_list, &parent->active_list_head); return true; } -static int android_fence_fill_driver_data(struct fence *fence, - void *data, int size) -{ - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); - - if (!parent->ops->fill_driver_data) - return 0; - return parent->ops->fill_driver_data(pt, data, size); -} - static void android_fence_value_str(struct fence *fence, char *str, int size) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); - if (!parent->ops->pt_value_str) { + if (!parent->ops->fence_value_str) { if (size) *str = 0; return; } - parent->ops->pt_value_str(pt, str, size); + parent->ops->fence_value_str(fence, str, size); } static void android_fence_timeline_value_str(struct fence *fence, char *str, int size) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); if (!parent->ops->timeline_value_str) { if (size) @@ -516,65 +384,57 @@ static const struct fence_ops android_fence_ops = { .signaled = android_fence_signaled, .wait = fence_default_wait, .release = android_fence_release, - .fill_driver_data = android_fence_fill_driver_data, .fence_value_str = android_fence_value_str, .timeline_value_str = android_fence_timeline_value_str, }; -static void sync_fence_free(struct kref *kref) +static void sync_file_free(struct kref *kref) { - struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + struct sync_file *sync_file = container_of(kref, struct sync_file, + kref); int i; - for (i = 0; i < fence->num_fences; ++i) { - fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb); - fence_put(fence->cbs[i].sync_pt); + for (i = 0; i < sync_file->num_fences; ++i) { + fence_remove_callback(sync_file->cbs[i].fence, + &sync_file->cbs[i].cb); + fence_put(sync_file->cbs[i].fence); } - kfree(fence); + kfree(sync_file); } -static int sync_fence_release(struct inode *inode, struct file *file) +static int sync_file_release(struct inode *inode, struct file *file) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; - sync_fence_debug_remove(fence); + sync_file_debug_remove(sync_file); - kref_put(&fence->kref, sync_fence_free); + kref_put(&sync_file->kref, sync_file_free); return 0; } -static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +static unsigned int sync_file_poll(struct file *file, poll_table *wait) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; int status; - poll_wait(file, &fence->wq, wait); + poll_wait(file, &sync_file->wq, wait); - status = atomic_read(&fence->status); + status = atomic_read(&sync_file->status); if (!status) return POLLIN; - else if (status < 0) + if (status < 0) return POLLERR; return 0; } -static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) -{ - __s32 value; - - if (copy_from_user(&value, (void __user *)arg, sizeof(value))) - return -EFAULT; - - return sync_fence_wait(fence, value); -} - -static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +static long sync_file_ioctl_merge(struct sync_file *sync_file, + unsigned long arg) { int fd = get_unused_fd_flags(O_CLOEXEC); int err; - struct sync_fence *fence2, *fence3; + struct sync_file *fence2, *fence3; struct sync_merge_data data; if (fd < 0) @@ -585,14 +445,14 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) goto err_put_fd; } - fence2 = sync_fence_fdget(data.fd2); + fence2 = sync_file_fdget(data.fd2); if (!fence2) { err = -ENOENT; goto err_put_fd; } data.name[sizeof(data.name) - 1] = '\0'; - fence3 = sync_fence_merge(data.name, fence, fence2); + fence3 = sync_file_merge(data.name, sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; @@ -604,40 +464,28 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) goto err_put_fence3; } - sync_fence_install(fence3, fd); - sync_fence_put(fence2); + sync_file_install(fence3, fd); + sync_file_put(fence2); return 0; err_put_fence3: - sync_fence_put(fence3); + sync_file_put(fence3); err_put_fence2: - sync_fence_put(fence2); + sync_file_put(fence2); err_put_fd: put_unused_fd(fd); return err; } -static int sync_fill_pt_info(struct fence *fence, void *data, int size) +static int sync_fill_fence_info(struct fence *fence, void *data, int size) { - struct sync_pt_info *info = data; - int ret; + struct sync_fence_info *info = data; - if (size < sizeof(struct sync_pt_info)) + if (size < sizeof(*info)) return -ENOMEM; - info->len = sizeof(struct sync_pt_info); - - if (fence->ops->fill_driver_data) { - ret = fence->ops->fill_driver_data(fence, info->driver_data, - size - sizeof(*info)); - if (ret < 0) - return ret; - - info->len += ret; - } - strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); strlcpy(info->driver_name, fence->ops->get_driver_name(fence), @@ -648,13 +496,13 @@ static int sync_fill_pt_info(struct fence *fence, void *data, int size) info->status = 0; info->timestamp_ns = ktime_to_ns(fence->timestamp); - return info->len; + return sizeof(*info); } -static long sync_fence_ioctl_fence_info(struct sync_fence *fence, +static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { - struct sync_fence_info_data *data; + struct sync_file_info *info; __u32 size; __u32 len = 0; int ret, i; @@ -662,27 +510,27 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, if (copy_from_user(&size, (void __user *)arg, sizeof(size))) return -EFAULT; - if (size < sizeof(struct sync_fence_info_data)) + if (size < sizeof(struct sync_file_info)) return -EINVAL; if (size > 4096) size = 4096; - data = kzalloc(size, GFP_KERNEL); - if (!data) + info = kzalloc(size, GFP_KERNEL); + if (!info) return -ENOMEM; - strlcpy(data->name, fence->name, sizeof(data->name)); - data->status = atomic_read(&fence->status); - if (data->status >= 0) - data->status = !data->status; + strlcpy(info->name, sync_file->name, sizeof(info->name)); + info->status = atomic_read(&sync_file->status); + if (info->status >= 0) + info->status = !info->status; - len = sizeof(struct sync_fence_info_data); + len = sizeof(struct sync_file_info); - for (i = 0; i < fence->num_fences; ++i) { - struct fence *pt = fence->cbs[i].sync_pt; + for (i = 0; i < sync_file->num_fences; ++i) { + struct fence *fence = sync_file->cbs[i].fence; - ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len); if (ret < 0) goto out; @@ -690,43 +538,40 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, len += ret; } - data->len = len; + info->len = len; - if (copy_to_user((void __user *)arg, data, len)) + if (copy_to_user((void __user *)arg, info, len)) ret = -EFAULT; else ret = 0; out: - kfree(data); + kfree(info); return ret; } -static long sync_fence_ioctl(struct file *file, unsigned int cmd, +static long sync_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; switch (cmd) { - case SYNC_IOC_WAIT: - return sync_fence_ioctl_wait(fence, arg); - case SYNC_IOC_MERGE: - return sync_fence_ioctl_merge(fence, arg); + return sync_file_ioctl_merge(sync_file, arg); case SYNC_IOC_FENCE_INFO: - return sync_fence_ioctl_fence_info(fence, arg); + return sync_file_ioctl_fence_info(sync_file, arg); default: return -ENOTTY; } } -static const struct file_operations sync_fence_fops = { - .release = sync_fence_release, - .poll = sync_fence_poll, - .unlocked_ioctl = sync_fence_ioctl, - .compat_ioctl = sync_fence_ioctl, +static const struct file_operations sync_file_fops = { + .release = sync_file_release, + .poll = sync_file_poll, + .unlocked_ioctl = sync_file_ioctl, + .compat_ioctl = sync_file_ioctl, }; diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index afa075227..d2a173433 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -18,63 +18,35 @@ #include <linux/ktime.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/wait.h> #include <linux/fence.h> #include "uapi/sync.h" struct sync_timeline; -struct sync_pt; -struct sync_fence; +struct sync_file; /** * struct sync_timeline_ops - sync object implementation ops * @driver_name: name of the implementation - * @dup: duplicate a sync_pt * @has_signaled: returns: * 1 if pt has signaled * 0 if pt has not signaled * <0 on error - * @compare: returns: - * 1 if b will signal before a - * 0 if a and b will signal at the same time - * -1 if a will signal before b - * @free_pt: called before sync_pt is freed - * @release_obj: called before sync_timeline is freed - * @fill_driver_data: write implementation specific driver data to data. - * should return an error if there is not enough room - * as specified by size. This information is returned - * to userspace by SYNC_IOC_FENCE_INFO. * @timeline_value_str: fill str with the value of the sync_timeline's counter - * @pt_value_str: fill str with the value of the sync_pt + * @fence_value_str: fill str with the value of the fence */ struct sync_timeline_ops { const char *driver_name; /* required */ - struct sync_pt * (*dup)(struct sync_pt *pt); - - /* required */ - int (*has_signaled)(struct sync_pt *pt); - - /* required */ - int (*compare)(struct sync_pt *a, struct sync_pt *b); - - /* optional */ - void (*free_pt)(struct sync_pt *sync_pt); - - /* optional */ - void (*release_obj)(struct sync_timeline *sync_timeline); - - /* optional */ - int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + int (*has_signaled)(struct fence *fence); /* optional */ void (*timeline_value_str)(struct sync_timeline *timeline, char *str, int size); /* optional */ - void (*pt_value_str)(struct sync_pt *pt, char *str, int size); + void (*fence_value_str)(struct fence *fence, char *str, int size); }; /** @@ -85,7 +57,7 @@ struct sync_timeline_ops { * @destroyed: set when sync_timeline is destroyed * @child_list_head: list of children sync_pts for this sync_timeline * @child_list_lock: lock protecting @child_list_head, destroyed, and - * sync_pt.status + * fence.status * @active_list_head: list of active (unsignaled/errored) sync_pts * @sync_timeline_list: membership in global sync_timeline_list */ @@ -108,86 +80,44 @@ struct sync_timeline { #endif }; -/** - * struct sync_pt - sync point - * @fence: base fence class - * @child_list: membership in sync_timeline.child_list_head - * @active_list: membership in sync_timeline.active_list_head - * @signaled_list: membership in temporary signaled_list on stack - * @fence: sync_fence to which the sync_pt belongs - * @pt_list: membership in sync_fence.pt_list_head - * @status: 1: signaled, 0:active, <0: error - * @timestamp: time which sync_pt status transitioned from active to - * signaled or error. - */ -struct sync_pt { - struct fence base; - - struct list_head child_list; - struct list_head active_list; -}; - -static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +static inline struct sync_timeline *fence_parent(struct fence *fence) { - return container_of(pt->base.lock, struct sync_timeline, + return container_of(fence->lock, struct sync_timeline, child_list_lock); } -struct sync_fence_cb { +struct sync_file_cb { struct fence_cb cb; - struct fence *sync_pt; - struct sync_fence *fence; + struct fence *fence; + struct sync_file *sync_file; }; /** - * struct sync_fence - sync fence + * struct sync_file - sync file to export to the userspace * @file: file representing this fence * @kref: reference count on fence. - * @name: name of sync_fence. Useful for debugging - * @pt_list_head: list of sync_pts in the fence. immutable once fence - * is created - * @status: 0: signaled, >0:active, <0: error - * + * @name: name of sync_file. Useful for debugging + * @sync_file_list: membership in global file list + * @num_fences number of sync_pts in the fence * @wq: wait queue for fence signaling - * @sync_fence_list: membership in global fence list + * @status: 0: signaled, >0:active, <0: error + * @cbs: sync_pts callback information */ -struct sync_fence { +struct sync_file { struct file *file; struct kref kref; char name[32]; #ifdef CONFIG_DEBUG_FS - struct list_head sync_fence_list; + struct list_head sync_file_list; #endif int num_fences; wait_queue_head_t wq; atomic_t status; - struct sync_fence_cb cbs[]; -}; - -struct sync_fence_waiter; -typedef void (*sync_callback_t)(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * struct sync_fence_waiter - metadata for asynchronous waiter on a fence - * @waiter_list: membership in sync_fence.waiter_list_head - * @callback: function pointer to call when fence signals - * @callback_data: pointer to pass to @callback - */ -struct sync_fence_waiter { - wait_queue_t work; - sync_callback_t callback; + struct sync_file_cb cbs[]; }; -static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, - sync_callback_t callback) -{ - INIT_LIST_HEAD(&waiter->work.task_list); - waiter->callback = callback; -} - /* * API for sync_timeline implementers */ @@ -200,7 +130,8 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, * * Creates a new sync_timeline which will use the implementation specified by * @ops. @size bytes will be allocated allowing for implementation specific - * data to be kept after the generic sync_timeline struct. + * data to be kept after the generic sync_timeline struct. Returns the + * sync_timeline object or NULL in case of error. */ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name); @@ -211,7 +142,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, * * A sync implementation should call this when the @obj is going away * (i.e. module unload.) @obj won't actually be freed until all its children - * sync_pts are freed. + * fences are freed. */ void sync_timeline_destroy(struct sync_timeline *obj); @@ -219,148 +150,92 @@ void sync_timeline_destroy(struct sync_timeline *obj); * sync_timeline_signal() - signal a status change on a sync_timeline * @obj: sync_timeline to signal * - * A sync implementation should call this any time one of it's sync_pts + * A sync implementation should call this any time one of it's fences * has signaled or has an error condition. */ void sync_timeline_signal(struct sync_timeline *obj); /** * sync_pt_create() - creates a sync pt - * @parent: sync_pt's parent sync_timeline + * @parent: fence's parent sync_timeline * @size: size to allocate for this pt * - * Creates a new sync_pt as a child of @parent. @size bytes will be + * Creates a new fence as a child of @parent. @size bytes will be * allocated allowing for implementation specific data to be kept after - * the generic sync_timeline struct. - */ -struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); - -/** - * sync_pt_free() - frees a sync pt - * @pt: sync_pt to free - * - * This should only be called on sync_pts which have been created but - * not added to a fence. + * the generic sync_timeline struct. Returns the fence object or + * NULL in case of error. */ -void sync_pt_free(struct sync_pt *pt); +struct fence *sync_pt_create(struct sync_timeline *parent, int size); /** * sync_fence_create() - creates a sync fence * @name: name of fence to create - * @pt: sync_pt to add to the fence - * - * Creates a fence containg @pt. Once this is called, the fence takes - * ownership of @pt. - */ -struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); - -/** - * sync_fence_create_dma() - creates a sync fence from dma-fence - * @name: name of fence to create - * @pt: dma-fence to add to the fence + * @fence: fence to add to the sync_fence * - * Creates a fence containg @pt. Once this is called, the fence takes - * ownership of @pt. + * Creates a sync_file containg @fence. Once this is called, the sync_file + * takes ownership of @fence. */ -struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt); +struct sync_file *sync_file_create(const char *name, struct fence *fence); /* - * API for sync_fence consumers + * API for sync_file consumers */ /** - * sync_fence_merge() - merge two fences + * sync_file_merge() - merge two sync_files * @name: name of new fence - * @a: fence a - * @b: fence b + * @a: sync_file a + * @b: sync_file b * - * Creates a new fence which contains copies of all the sync_pts in both - * @a and @b. @a and @b remain valid, independent fences. + * Creates a new sync_file which contains copies of all the fences in both + * @a and @b. @a and @b remain valid, independent sync_file. Returns the + * new merged sync_file or NULL in case of error. */ -struct sync_fence *sync_fence_merge(const char *name, - struct sync_fence *a, struct sync_fence *b); +struct sync_file *sync_file_merge(const char *name, + struct sync_file *a, struct sync_file *b); /** - * sync_fence_fdget() - get a fence from an fd + * sync_file_fdget() - get a sync_file from an fd * @fd: fd referencing a fence * - * Ensures @fd references a valid fence, increments the refcount of the backing - * file, and returns the fence. + * Ensures @fd references a valid sync_file, increments the refcount of the + * backing file. Returns the sync_file or NULL in case of error. */ -struct sync_fence *sync_fence_fdget(int fd); +struct sync_file *sync_file_fdget(int fd); /** - * sync_fence_put() - puts a reference of a sync fence - * @fence: fence to put + * sync_file_put() - puts a reference of a sync_file + * @sync_file: sync_file to put * - * Puts a reference on @fence. If this is the last reference, the fence and - * all it's sync_pts will be freed + * Puts a reference on @sync_fence. If this is the last reference, the + * sync_fil and all it's sync_pts will be freed */ -void sync_fence_put(struct sync_fence *fence); +void sync_file_put(struct sync_file *sync_file); /** - * sync_fence_install() - installs a fence into a file descriptor - * @fence: fence to install + * sync_file_install() - installs a sync_file into a file descriptor + * @sync_file: sync_file to install * @fd: file descriptor in which to install the fence * - * Installs @fence into @fd. @fd's should be acquired through + * Installs @sync_file into @fd. @fd's should be acquired through * get_unused_fd_flags(O_CLOEXEC). */ -void sync_fence_install(struct sync_fence *fence, int fd); - -/** - * sync_fence_wait_async() - registers and async wait on the fence - * @fence: fence to wait on - * @waiter: waiter callback struck - * - * Returns 1 if @fence has already signaled. - * - * Registers a callback to be called when @fence signals or has an error. - * @waiter should be initialized with sync_fence_waiter_init(). - */ -int sync_fence_wait_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * sync_fence_cancel_async() - cancels an async wait - * @fence: fence to wait on - * @waiter: waiter callback struck - * - * returns 0 if waiter was removed from fence's async waiter list. - * returns -ENOENT if waiter was not found on fence's async waiter list. - * - * Cancels a previously registered async wait. Will fail gracefully if - * @waiter was never registered or if @fence has already signaled @waiter. - */ -int sync_fence_cancel_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * sync_fence_wait() - wait on fence - * @fence: fence to wait on - * @tiemout: timeout in ms - * - * Wait for @fence to be signaled or have an error. Waits indefinitely - * if @timeout < 0 - */ -int sync_fence_wait(struct sync_fence *fence, long timeout); +void sync_file_install(struct sync_file *sync_file, int fd); #ifdef CONFIG_DEBUG_FS void sync_timeline_debug_add(struct sync_timeline *obj); void sync_timeline_debug_remove(struct sync_timeline *obj); -void sync_fence_debug_add(struct sync_fence *fence); -void sync_fence_debug_remove(struct sync_fence *fence); +void sync_file_debug_add(struct sync_file *fence); +void sync_file_debug_remove(struct sync_file *fence); void sync_dump(void); #else # define sync_timeline_debug_add(obj) # define sync_timeline_debug_remove(obj) -# define sync_fence_debug_add(fence) -# define sync_fence_debug_remove(fence) +# define sync_file_debug_add(fence) +# define sync_file_debug_remove(fence) # define sync_dump() #endif -int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, - int wake_flags, void *key); #endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c index f45d13cdd..5a7ec58fb 100644 --- a/drivers/staging/android/sync_debug.c +++ b/drivers/staging/android/sync_debug.c @@ -26,14 +26,16 @@ #include <linux/uaccess.h> #include <linux/anon_inodes.h> #include <linux/time64.h> -#include "sync.h" +#include "sw_sync.h" #ifdef CONFIG_DEBUG_FS +static struct dentry *dbgfs; + static LIST_HEAD(sync_timeline_list_head); static DEFINE_SPINLOCK(sync_timeline_list_lock); -static LIST_HEAD(sync_fence_list_head); -static DEFINE_SPINLOCK(sync_fence_list_lock); +static LIST_HEAD(sync_file_list_head); +static DEFINE_SPINLOCK(sync_file_list_lock); void sync_timeline_debug_add(struct sync_timeline *obj) { @@ -53,22 +55,22 @@ void sync_timeline_debug_remove(struct sync_timeline *obj) spin_unlock_irqrestore(&sync_timeline_list_lock, flags); } -void sync_fence_debug_add(struct sync_fence *fence) +void sync_file_debug_add(struct sync_file *sync_file) { unsigned long flags; - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_add_tail(&sync_file->sync_file_list, &sync_file_list_head); + spin_unlock_irqrestore(&sync_file_list_lock, flags); } -void sync_fence_debug_remove(struct sync_fence *fence) +void sync_file_debug_remove(struct sync_file *sync_file) { unsigned long flags; - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_del(&fence->sync_fence_list); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_del(&sync_file->sync_file_list); + spin_unlock_irqrestore(&sync_file_list_lock, flags); } static const char *sync_status_str(int status) @@ -82,39 +84,40 @@ static const char *sync_status_str(int status) return "error"; } -static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence) +static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) { int status = 1; + struct sync_timeline *parent = fence_parent(fence); - if (fence_is_signaled_locked(pt)) - status = pt->status; + if (fence_is_signaled_locked(fence)) + status = fence->status; - seq_printf(s, " %s%spt %s", - fence && pt->ops->get_timeline_name ? - pt->ops->get_timeline_name(pt) : "", - fence ? "_" : "", + seq_printf(s, " %s%sfence %s", + show ? parent->name : "", + show ? "_" : "", sync_status_str(status)); if (status <= 0) { struct timespec64 ts64 = - ktime_to_timespec64(pt->timestamp); + ktime_to_timespec64(fence->timestamp); seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); } - if ((!fence || pt->ops->timeline_value_str) && - pt->ops->fence_value_str) { + if ((!fence || fence->ops->timeline_value_str) && + fence->ops->fence_value_str) { char value[64]; bool success; - pt->ops->fence_value_str(pt, value, sizeof(value)); + fence->ops->fence_value_str(fence, value, sizeof(value)); success = strlen(value); if (success) seq_printf(s, ": %s", value); if (success && fence) { - pt->ops->timeline_value_str(pt, value, sizeof(value)); + fence->ops->timeline_value_str(fence, value, + sizeof(value)); if (strlen(value)) seq_printf(s, " / %s", value); @@ -142,38 +145,23 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) spin_lock_irqsave(&obj->child_list_lock, flags); list_for_each(pos, &obj->child_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, child_list); - sync_print_pt(s, &pt->base, false); + struct fence *fence = + container_of(pos, struct fence, child_list); + sync_print_fence(s, fence, false); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } -static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +static void sync_print_sync_file(struct seq_file *s, + struct sync_file *sync_file) { - wait_queue_t *pos; - unsigned long flags; int i; - seq_printf(s, "[%p] %s: %s\n", fence, fence->name, - sync_status_str(atomic_read(&fence->status))); - - for (i = 0; i < fence->num_fences; ++i) { - sync_print_pt(s, fence->cbs[i].sync_pt, true); - } - - spin_lock_irqsave(&fence->wq.lock, flags); - list_for_each_entry(pos, &fence->wq.task_list, task_list) { - struct sync_fence_waiter *waiter; - - if (pos->func != &sync_fence_wake_up_wq) - continue; + seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, + sync_status_str(atomic_read(&sync_file->status))); - waiter = container_of(pos, struct sync_fence_waiter, work); - - seq_printf(s, "waiter %pF\n", waiter->callback); - } - spin_unlock_irqrestore(&fence->wq.lock, flags); + for (i = 0; i < sync_file->num_fences; ++i) + sync_print_fence(s, sync_file->cbs[i].fence, true); } static int sync_debugfs_show(struct seq_file *s, void *unused) @@ -196,33 +184,152 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) seq_puts(s, "fences:\n--------------\n"); - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_for_each(pos, &sync_fence_list_head) { - struct sync_fence *fence = - container_of(pos, struct sync_fence, sync_fence_list); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_for_each(pos, &sync_file_list_head) { + struct sync_file *sync_file = + container_of(pos, struct sync_file, sync_file_list); - sync_print_fence(s, fence); + sync_print_sync_file(s, sync_file); seq_puts(s, "\n"); } - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_unlock_irqrestore(&sync_file_list_lock, flags); return 0; } -static int sync_debugfs_open(struct inode *inode, struct file *file) +static int sync_info_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, sync_debugfs_show, inode->i_private); } -static const struct file_operations sync_debugfs_fops = { - .open = sync_debugfs_open, +static const struct file_operations sync_info_debugfs_fops = { + .open = sync_info_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +/* + * *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +static int sw_sync_debugfs_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (!obj) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +static int sw_sync_debugfs_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + + sync_timeline_destroy(&obj->obj); + return 0; +} + +static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, + unsigned long arg) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + int err; + struct fence *fence; + struct sync_file *sync_file; + struct sw_sync_create_fence_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err; + } + + fence = sw_sync_pt_create(obj, data.value); + if (!fence) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + sync_file = sync_file_create(data.name, fence); + if (!sync_file) { + fence_put(fence); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_file_put(sync_file); + err = -EFAULT; + goto err; + } + + sync_file_install(sync_file, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +static long sw_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_debugfs_fops = { + .open = sw_sync_debugfs_open, + .release = sw_sync_debugfs_release, + .unlocked_ioctl = sw_sync_ioctl, + .compat_ioctl = sw_sync_ioctl, +}; + static __init int sync_debugfs_init(void) { - debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + dbgfs = debugfs_create_dir("sync", NULL); + + debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops); + debugfs_create_file("sw_sync", 0644, dbgfs, NULL, + &sw_sync_debugfs_fops); + return 0; } late_initcall(sync_debugfs_init); diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index bcd9924d4..914fd1005 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -92,9 +92,8 @@ static int timed_gpio_probe(struct platform_device *pdev) if (!pdata) return -EBUSY; - gpio_data = devm_kzalloc(&pdev->dev, - sizeof(*gpio_data) * pdata->num_gpios, - GFP_KERNEL); + gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios, + sizeof(*gpio_data), GFP_KERNEL); if (!gpio_data) return -ENOMEM; diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h index 77edb977a..a0f80f416 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/staging/android/trace/sync.h @@ -32,50 +32,6 @@ TRACE_EVENT(sync_timeline, TP_printk("name=%s value=%s", __get_str(name), __entry->value) ); -TRACE_EVENT(sync_wait, - TP_PROTO(struct sync_fence *fence, int begin), - - TP_ARGS(fence, begin), - - TP_STRUCT__entry( - __string(name, fence->name) - __field(s32, status) - __field(u32, begin) - ), - - TP_fast_assign( - __assign_str(name, fence->name); - __entry->status = atomic_read(&fence->status); - __entry->begin = begin; - ), - - TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", - __get_str(name), __entry->status) -); - -TRACE_EVENT(sync_pt, - TP_PROTO(struct fence *pt), - - TP_ARGS(pt), - - TP_STRUCT__entry( - __string(timeline, pt->ops->get_timeline_name(pt)) - __array(char, value, 32) - ), - - TP_fast_assign( - __assign_str(timeline, pt->ops->get_timeline_name(pt)); - if (pt->ops->fence_value_str) { - pt->ops->fence_value_str(pt, __entry->value, - sizeof(__entry->value)); - } else { - __entry->value[0] = '\0'; - } - ), - - TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) -); - #endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h index ba4743c71..13df42d20 100644 --- a/drivers/staging/android/uapi/ashmem.h +++ b/drivers/staging/android/uapi/ashmem.h @@ -13,6 +13,7 @@ #define _UAPI_LINUX_ASHMEM_H #include <linux/ioctl.h> +#include <linux/types.h> #define ASHMEM_NAME_LEN 256 diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h index e964c751f..a0cf357e5 100644 --- a/drivers/staging/android/uapi/sync.h +++ b/drivers/staging/android/uapi/sync.h @@ -27,51 +27,39 @@ struct sync_merge_data { }; /** - * struct sync_pt_info - detailed sync_pt information - * @len: length of sync_pt_info including any driver_data + * struct sync_fence_info - detailed fence information * @obj_name: name of parent sync_timeline * @driver_name: name of driver implementing the parent - * @status: status of the sync_pt 0:active 1:signaled <0:error + * @status: status of the fence 0:active 1:signaled <0:error * @timestamp_ns: timestamp of status change in nanoseconds - * @driver_data: any driver dependent data */ -struct sync_pt_info { - __u32 len; +struct sync_fence_info { char obj_name[32]; char driver_name[32]; __s32 status; __u64 timestamp_ns; - - __u8 driver_data[0]; }; /** - * struct sync_fence_info_data - data returned from fence info ioctl + * struct sync_file_info - data returned from fence info ioctl * @len: ioctl caller writes the size of the buffer its passing in. - * ioctl returns length of sync_fence_data returned to userspace - * including pt_info. + * ioctl returns length of sync_file_info returned to + * userspace including pt_info. * @name: name of fence * @status: status of fence. 1: signaled 0:active <0:error - * @pt_info: a sync_pt_info struct for every sync_pt in the fence + * @sync_fence_info: array of sync_fence_info for every fence in the sync_file */ -struct sync_fence_info_data { +struct sync_file_info { __u32 len; char name[32]; __s32 status; - __u8 pt_info[0]; + __u8 sync_fence_info[0]; }; #define SYNC_IOC_MAGIC '>' /** - * DOC: SYNC_IOC_WAIT - wait for a fence to signal - * - * pass timeout in milliseconds. Waits indefinitely timeout < 0. - */ -#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) - -/** * DOC: SYNC_IOC_MERGE - merge two fences * * Takes a struct sync_merge_data. Creates a new fence containing copies of @@ -83,15 +71,14 @@ struct sync_fence_info_data { /** * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence * - * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Takes a struct sync_file_info_data with extra space allocated for pt_info. * Caller should write the size of the buffer into len. On return, len is - * updated to reflect the total size of the sync_fence_info_data including + * updated to reflect the total size of the sync_file_info_data including * pt_info. * * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. * To iterate over the sync_pt_infos, use the sync_pt_info.len field. */ -#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ - struct sync_fence_info_data) +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info) #endif /* _UAPI_LINUX_SYNC_H */ |