diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 15 | ||||
-rw-r--r-- | drivers/dma-buf/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 59 | ||||
-rw-r--r-- | drivers/dma-buf/fence-array.c | 144 | ||||
-rw-r--r-- | drivers/dma-buf/fence.c | 8 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 2 |
6 files changed, 187 insertions, 43 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 9824bc4ad..25bcfa0b4 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -1,11 +1,20 @@ menu "DMABUF options" config SYNC_FILE - bool "sync_file support for fences" + bool "Explicit Synchronization Framework" default n select ANON_INODES select DMA_SHARED_BUFFER ---help--- - This option enables the fence framework synchronization to export - sync_files to userspace that can represent one or more fences. + The Sync File Framework adds explicit syncronization via + userspace. It enables send/receive 'struct fence' objects to/from + userspace via Sync File fds for synchronization between drivers via + userspace components. It has been ported from Android. + + The first and main user for this is graphics in which a fence is + associated with a buffer. When a job is submitted to the GPU a fence + is attached to the buffer and is transferred via userspace, using Sync + Files fds, to the DRM driver for example. More details at + Documentation/sync_file.txt. + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a424eca7..f353db213 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,2 +1,2 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o +obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o obj-$(CONFIG_SYNC_FILE) += sync_file.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 6355ab38d..ddaee60ae 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) struct reservation_object *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); + int ret; if (!exp_info->resv) alloc_size += sizeof(struct reservation_object); @@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { - module_put(exp_info->owner); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_module; } dmabuf->priv = exp_info->priv; @@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, exp_info->flags); if (IS_ERR(file)) { - kfree(dmabuf); - return ERR_CAST(file); + ret = PTR_ERR(file); + goto err_dmabuf; } file->f_mode |= FMODE_LSEEK; @@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) mutex_unlock(&db_list.lock); return dmabuf; + +err_dmabuf: + kfree(dmabuf); +err_module: + module_put(exp_info->owner); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(dma_buf_export); @@ -824,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) EXPORT_SYMBOL_GPL(dma_buf_vunmap); #ifdef CONFIG_DEBUG_FS -static int dma_buf_describe(struct seq_file *s) +static int dma_buf_debug_show(struct seq_file *s, void *unused) { int ret; struct dma_buf *buf_obj; @@ -879,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s) return 0; } -static int dma_buf_show(struct seq_file *s, void *unused) -{ - void (*func)(struct seq_file *) = s->private; - - func(s); - return 0; -} - static int dma_buf_debug_open(struct inode *inode, struct file *file) { - return single_open(file, dma_buf_show, inode->i_private); + return single_open(file, dma_buf_debug_show, NULL); } static const struct file_operations dma_buf_debug_fops = { @@ -903,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir; static int dma_buf_init_debugfs(void) { + struct dentry *d; int err = 0; - dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); + d = debugfs_create_dir("dma_buf", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); - if (IS_ERR(dma_buf_debugfs_dir)) { - err = PTR_ERR(dma_buf_debugfs_dir); - dma_buf_debugfs_dir = NULL; - return err; - } - - err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); + dma_buf_debugfs_dir = d; - if (err) + d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, + NULL, &dma_buf_debug_fops); + if (IS_ERR(d)) { pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); + debugfs_remove_recursive(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + err = PTR_ERR(d); + } return err; } @@ -926,17 +928,6 @@ static void dma_buf_uninit_debugfs(void) if (dma_buf_debugfs_dir) debugfs_remove_recursive(dma_buf_debugfs_dir); } - -int dma_buf_debugfs_create_file(const char *name, - int (*write)(struct seq_file *)) -{ - struct dentry *d; - - d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, - write, &dma_buf_debug_fops); - - return PTR_ERR_OR_ZERO(d); -} #else static inline int dma_buf_init_debugfs(void) { diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c new file mode 100644 index 000000000..a8731c853 --- /dev/null +++ b/drivers/dma-buf/fence-array.c @@ -0,0 +1,144 @@ +/* + * fence-array: aggregate fences to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/fence-array.h> + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); + +static const char *fence_array_get_driver_name(struct fence *fence) +{ + return "fence_array"; +} + +static const char *fence_array_get_timeline_name(struct fence *fence) +{ + return "unbound"; +} + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) +{ + struct fence_array_cb *array_cb = + container_of(cb, struct fence_array_cb, cb); + struct fence_array *array = array_cb->array; + + if (atomic_dec_and_test(&array->num_pending)) + fence_signal(&array->base); + fence_put(&array->base); +} + +static bool fence_array_enable_signaling(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + struct fence_array_cb *cb = (void *)(&array[1]); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) { + cb[i].array = array; + /* + * As we may report that the fence is signaled before all + * callbacks are complete, we need to take an additional + * reference count on the array so that we do not free it too + * early. The core fence handling will only hold the reference + * until we signal the array as complete (but that is now + * insufficient). + */ + fence_get(&array->base); + if (fence_add_callback(array->fences[i], &cb[i].cb, + fence_array_cb_func)) { + fence_put(&array->base); + if (atomic_dec_and_test(&array->num_pending)) + return false; + } + } + + return true; +} + +static bool fence_array_signaled(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + + return atomic_read(&array->num_pending) <= 0; +} + +static void fence_array_release(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + fence_put(array->fences[i]); + + kfree(array->fences); + fence_free(fence); +} + +const struct fence_ops fence_array_ops = { + .get_driver_name = fence_array_get_driver_name, + .get_timeline_name = fence_array_get_timeline_name, + .enable_signaling = fence_array_enable_signaling, + .signaled = fence_array_signaled, + .wait = fence_default_wait, + .release = fence_array_release, +}; + +/** + * fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any [in] signal on any fence in the array + * + * Allocate a fence_array object and initialize the base fence with fence_init(). + * In case of error it returns NULL. + * + * The caller should allocte the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is take and fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct fence_array *array; + size_t size = sizeof(*array); + + /* Allocate the callback structures behind the array. */ + size += num_fences * sizeof(struct fence_array_cb); + array = kzalloc(size, GFP_KERNEL); + if (!array) + return NULL; + + spin_lock_init(&array->lock); + fence_init(&array->base, &fence_array_ops, &array->lock, + context, seqno); + + array->num_fences = num_fences; + atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); + array->fences = fences; + + return array; +} +EXPORT_SYMBOL(fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 7b05dbe9b..4d51f9e83 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ -static atomic_t fence_context_counter = ATOMIC_INIT(0); +static atomic64_t fence_context_counter = ATOMIC64_INIT(0); /** * fence_context_alloc - allocate an array of fence contexts @@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0); * This function will return the first index of the number of fences allocated. * The fence context is used for setting fence->context to a unique number. */ -unsigned fence_context_alloc(unsigned num) +u64 fence_context_alloc(unsigned num) { BUG_ON(!num); - return atomic_add_return(num, &fence_context_counter) - num; + return atomic64_add_return(num, &fence_context_counter) - num; } EXPORT_SYMBOL(fence_context_alloc); @@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout); */ void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno) + spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->wait || !ops->enable_signaling || diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index f08cf2d83..9aaa608df 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence) sync_file->num_fences = 1; atomic_set(&sync_file->status, 1); - snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", + snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno); |