summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_fops.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-06-10 05:30:17 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-06-10 05:30:17 -0300
commitd635711daa98be86d4c7fd01499c34f566b54ccb (patch)
treeaa5cc3760a27c3d57146498cb82fa549547de06c /drivers/gpu/drm/drm_fops.c
parentc91265cd0efb83778f015b4d4b1129bd2cfd075e (diff)
Linux-libre 4.6.2-gnu
Diffstat (limited to 'drivers/gpu/drm/drm_fops.c')
-rw-r--r--drivers/gpu/drm/drm_fops.c305
1 files changed, 275 insertions, 30 deletions
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 1ea8790e5..aeef58ed3 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_fops.c
* File operations for DRM
*
@@ -44,6 +44,46 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+/**
+ * DOC: file operations
+ *
+ * Drivers must define the file operations structure that forms the DRM
+ * userspace API entry point, even though most of those operations are
+ * implemented in the DRM core. The mandatory functions are drm_open(),
+ * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
+ * Drivers which implement private ioctls that require 32/64 bit compatibility
+ * support must provided their onw .compat_ioctl() handler that processes
+ * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ *
+ * In addition drm_read() and drm_poll() provide support for DRM events. DRM
+ * events are a generic and extensible means to send asynchronous events to
+ * userspace through the file descriptor. They are used to send vblank event and
+ * page flip completions by the KMS API. But drivers can also use it for their
+ * own needs, e.g. to signal completion of rendering.
+ *
+ * The memory mapping implementation will vary depending on how the driver
+ * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
+ * function, modern drivers should use one of the provided memory-manager
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ *
+ * No other file operations are supported by the DRM userspace API. Overall the
+ * following is an example #file_operations structure:
+ *
+ * static const example_drm_fops = {
+ * .owner = THIS_MODULE,
+ * .open = drm_open,
+ * .release = drm_release,
+ * .unlocked_ioctl = drm_ioctl,
+ * #ifdef CONFIG_COMPAT
+ * .compat_ioctl = drm_compat_ioctl,
+ * #endif
+ * .poll = drm_poll,
+ * .read = drm_read,
+ * .llseek = no_llseek,
+ * .mmap = drm_gem_mmap,
+ * };
+ */
+
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
static int drm_setup(struct drm_device * dev)
@@ -67,15 +107,17 @@ static int drm_setup(struct drm_device * dev)
}
/**
- * Open file.
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
*
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .open() #file_operations
+ * method. It looks up the correct DRM device and instantiates all the per-file
+ * resources for it.
+ *
+ * RETURNS:
*
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
+ * 0 on success or negative errno value on falure.
*/
int drm_open(struct inode *inode, struct file *filp)
{
@@ -112,7 +154,7 @@ err_undo:
}
EXPORT_SYMBOL(drm_open);
-/**
+/*
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
@@ -125,7 +167,7 @@ static int drm_cpu_valid(void)
return 1;
}
-/**
+/*
* drm_new_set_master - Allocate a new master object and become master for the
* associated master realm.
*
@@ -179,7 +221,7 @@ out_err:
return ret;
}
-/**
+/*
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -222,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
INIT_LIST_HEAD(&priv->fbs);
mutex_init(&priv->fbs_lock);
INIT_LIST_HEAD(&priv->blobs);
+ INIT_LIST_HEAD(&priv->pending_event_list);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -311,18 +354,16 @@ static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e, *et;
- struct drm_pending_vblank_event *v, *vt;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- /* Remove pending flips */
- list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
- if (v->base.file_priv == file_priv) {
- list_del(&v->base.link);
- drm_vblank_put(dev, v->pipe);
- v->base.destroy(&v->base);
- }
+ /* Unlink pending events */
+ list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+ pending_link) {
+ list_del(&e->pending_link);
+ e->file_priv = NULL;
+ }
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
@@ -333,7 +374,7 @@ static void drm_events_release(struct drm_file *file_priv)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-/**
+/*
* drm_legacy_dev_reinit
*
* Reinitializes a legacy/ums drm device in it's lastclose function.
@@ -350,7 +391,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
dev->if_version = 0;
}
-/**
+/*
* Take down the DRM device.
*
* \param dev DRM device structure.
@@ -387,16 +428,17 @@ int drm_lastclose(struct drm_device * dev)
}
/**
- * Release file.
+ * drm_release - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
*
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .release() #file_operations
+ * method. It frees any resources associated with the open file, and if this is
+ * the last open file for the DRM device also proceeds to call drm_lastclose().
*
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
*/
int drm_release(struct inode *inode, struct file *filp)
{
@@ -451,7 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
- /**
+ /*
* Since the master is disappearing, so is the
* possibility to lock.
*/
@@ -508,6 +550,32 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
+/**
+ * drm_read - read method for DRM file
+ * @filp: file pointer
+ * @buffer: userspace destination pointer for the read
+ * @count: count in bytes to read
+ * @offset: offset to read
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
+ * must set the .llseek() #file_operation to no_llseek(). Polling support is
+ * provided by drm_poll().
+ *
+ * This function will only ever read a full event. Therefore userspace must
+ * supply a big enough buffer to fit any event to ensure forward progress. Since
+ * the maximum event space is currently 4K it's recommended to just use that for
+ * safety.
+ *
+ * RETURNS:
+ *
+ * Number of bytes read (always aligned to full events, and can be 0) or a
+ * negative error code on failure.
+ */
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
@@ -578,6 +646,22 @@ put_back_event:
}
EXPORT_SYMBOL(drm_read);
+/**
+ * drm_poll - poll method for DRM file
+ * @filp: file pointer
+ * @wait: poll waiter table
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * See also drm_read().
+ *
+ * RETURNS:
+ *
+ * Mask of POLL flags indicating the current status of the file.
+ */
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
@@ -591,3 +675,164 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
return mask;
}
EXPORT_SYMBOL(drm_poll);
+
+/**
+ * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * This is the locked version of drm_event_reserve_init() for callers which
+ * already hold dev->event_lock.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init_locked(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e)
+{
+ if (file_priv->event_space < e->length)
+ return -ENOMEM;
+
+ file_priv->event_space -= e->length;
+
+ p->event = e;
+ list_add(&p->pending_link, &file_priv->pending_event_list);
+ p->file_priv = file_priv;
+
+ /* we *could* pass this in as arg, but everyone uses kfree: */
+ p->destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_event_reserve_init_locked);
+
+/**
+ * drm_event_reserve_init - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * Callers which already hold dev->event_lock should use
+ * drm_event_reserve_init() instead.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_event_reserve_init);
+
+/**
+ * drm_event_cancel_free - free a DRM event and release it's space
+ * @dev: DRM device
+ * @p: tracking structure for the pending event
+ *
+ * This function frees the event @p initialized with drm_event_reserve_init()
+ * and releases any allocated space.
+ */
+void drm_event_cancel_free(struct drm_device *dev,
+ struct drm_pending_event *p)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (p->file_priv) {
+ p->file_priv->event_space += p->event->length;
+ list_del(&p->pending_link);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ p->destroy(p);
+}
+EXPORT_SYMBOL(drm_event_cancel_free);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * dev->event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+ assert_spin_locked(&dev->event_lock);
+
+ if (!e->file_priv) {
+ e->destroy(e);
+ return;
+ }
+
+ list_del(&e->pending_link);
+ list_add_tail(&e->link,
+ &e->file_priv->event_list);
+ wake_up_interruptible(&e->file_priv->event_wait);
+}
+EXPORT_SYMBOL(drm_send_event_locked);
+
+/**
+ * drm_send_event - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. This function acquires dev->event_lock,
+ * see drm_send_event_locked() for callers which already hold this lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+ drm_send_event_locked(dev, e);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_send_event);