From 863981e96738983919de841ec669e157e6bdaeb0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Sun, 11 Sep 2016 04:34:46 -0300 Subject: Linux-libre 4.7.1-gnu --- drivers/media/platform/vsp1/vsp1_dl.c | 567 +++++++++++++++++++++++++--------- 1 file changed, 429 insertions(+), 138 deletions(-) (limited to 'drivers/media/platform/vsp1/vsp1_dl.c') diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index 1a9a58588..e238d9b93 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -18,139 +18,406 @@ #include "vsp1.h" #include "vsp1_dl.h" -#include "vsp1_pipe.h" -/* - * Global resources - * - * - Display-related interrupts (can be used for vblank evasion ?) - * - Display-list enable - * - Header-less for WPF0 - * - DL swap - */ - -#define VSP1_DL_BODY_SIZE (2 * 4 * 256) +#define VSP1_DL_NUM_ENTRIES 256 #define VSP1_DL_NUM_LISTS 3 +#define VSP1_DLH_INT_ENABLE (1 << 1) +#define VSP1_DLH_AUTO_START (1 << 0) + +struct vsp1_dl_header_list { + u32 num_bytes; + u32 addr; +} __attribute__((__packed__)); + +struct vsp1_dl_header { + u32 num_lists; + struct vsp1_dl_header_list lists[8]; + u32 next_header; + u32 flags; +} __attribute__((__packed__)); + struct vsp1_dl_entry { u32 addr; u32 data; } __attribute__((__packed__)); -struct vsp1_dl_list { +/** + * struct vsp1_dl_body - Display list body + * @list: entry in the display list list of bodies + * @vsp1: the VSP1 device + * @entries: array of entries + * @dma: DMA address of the entries + * @size: size of the DMA memory in bytes + * @num_entries: number of stored entries + */ +struct vsp1_dl_body { + struct list_head list; + struct vsp1_device *vsp1; + + struct vsp1_dl_entry *entries; + dma_addr_t dma; size_t size; - int reg_count; - bool in_use; + unsigned int num_entries; +}; - struct vsp1_dl_entry *body; +/** + * struct vsp1_dl_list - Display list + * @list: entry in the display list manager lists + * @dlm: the display list manager + * @header: display list header, NULL for headerless lists + * @dma: DMA address for the header + * @body0: first display list body + * @fragments: list of extra display list bodies + */ +struct vsp1_dl_list { + struct list_head list; + struct vsp1_dl_manager *dlm; + + struct vsp1_dl_header *header; dma_addr_t dma; + + struct vsp1_dl_body body0; + struct list_head fragments; +}; + +enum vsp1_dl_mode { + VSP1_DL_MODE_HEADER, + VSP1_DL_MODE_HEADERLESS, }; /** - * struct vsp1_dl - Display List manager + * struct vsp1_dl_manager - Display List manager + * @index: index of the related WPF + * @mode: display list operation mode (header or headerless) * @vsp1: the VSP1 device * @lock: protects the active, queued and pending lists - * @lists.all: array of all allocate display lists - * @lists.active: list currently being processed (loaded) by hardware - * @lists.queued: list queued to the hardware (written to the DL registers) - * @lists.pending: list waiting to be queued to the hardware - * @lists.write: list being written to by software + * @free: array of all free display lists + * @active: list currently being processed (loaded) by hardware + * @queued: list queued to the hardware (written to the DL registers) + * @pending: list waiting to be queued to the hardware */ -struct vsp1_dl { +struct vsp1_dl_manager { + unsigned int index; + enum vsp1_dl_mode mode; struct vsp1_device *vsp1; spinlock_t lock; + struct list_head free; + struct vsp1_dl_list *active; + struct vsp1_dl_list *queued; + struct vsp1_dl_list *pending; +}; - size_t size; - dma_addr_t dma; - void *mem; +/* ----------------------------------------------------------------------------- + * Display List Body Management + */ + +/* + * Initialize a display list body object and allocate DMA memory for the body + * data. The display list body object is expected to have been initialized to + * 0 when allocated. + */ +static int vsp1_dl_body_init(struct vsp1_device *vsp1, + struct vsp1_dl_body *dlb, unsigned int num_entries, + size_t extra_size) +{ + size_t size = num_entries * sizeof(*dlb->entries) + extra_size; - struct { - struct vsp1_dl_list all[VSP1_DL_NUM_LISTS]; + dlb->vsp1 = vsp1; + dlb->size = size; - struct vsp1_dl_list *active; - struct vsp1_dl_list *queued; - struct vsp1_dl_list *pending; - struct vsp1_dl_list *write; - } lists; -}; + dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma, + GFP_KERNEL); + if (!dlb->entries) + return -ENOMEM; + + return 0; +} + +/* + * Cleanup a display list body and free allocated DMA memory allocated. + */ +static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb) +{ + dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma); +} + +/** + * vsp1_dl_fragment_alloc - Allocate a display list fragment + * @vsp1: The VSP1 device + * @num_entries: The maximum number of entries that the fragment can contain + * + * Allocate a display list fragment with enough memory to contain the requested + * number of entries. + * + * Return a pointer to a fragment on success or NULL if memory can't be + * allocated. + */ +struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1, + unsigned int num_entries) +{ + struct vsp1_dl_body *dlb; + int ret; + + dlb = kzalloc(sizeof(*dlb), GFP_KERNEL); + if (!dlb) + return NULL; + + ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0); + if (ret < 0) { + kfree(dlb); + return NULL; + } + + return dlb; +} + +/** + * vsp1_dl_fragment_free - Free a display list fragment + * @dlb: The fragment + * + * Free the given display list fragment and the associated DMA memory. + * + * Fragments must only be freed explicitly if they are not added to a display + * list, as the display list will take ownership of them and free them + * otherwise. Manual free typically happens at cleanup time for fragments that + * have been allocated but not used. + * + * Passing a NULL pointer to this function is safe, in that case no operation + * will be performed. + */ +void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb) +{ + if (!dlb) + return; + + vsp1_dl_body_cleanup(dlb); + kfree(dlb); +} + +/** + * vsp1_dl_fragment_write - Write a register to a display list fragment + * @dlb: The fragment + * @reg: The register address + * @data: The register value + * + * Write the given register and value to the display list fragment. The maximum + * number of entries that can be written in a fragment is specified when the + * fragment is allocated by vsp1_dl_fragment_alloc(). + */ +void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data) +{ + dlb->entries[dlb->num_entries].addr = reg; + dlb->entries[dlb->num_entries].data = data; + dlb->num_entries++; +} /* ----------------------------------------------------------------------------- * Display List Transaction Management */ -static void vsp1_dl_free_list(struct vsp1_dl_list *list) +static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) { - if (!list) - return; + struct vsp1_dl_list *dl; + size_t header_size; + int ret; + + dl = kzalloc(sizeof(*dl), GFP_KERNEL); + if (!dl) + return NULL; - list->in_use = false; + INIT_LIST_HEAD(&dl->fragments); + dl->dlm = dlm; + + /* Initialize the display list body and allocate DMA memory for the body + * and the optional header. Both are allocated together to avoid memory + * fragmentation, with the header located right after the body in + * memory. + */ + header_size = dlm->mode == VSP1_DL_MODE_HEADER + ? ALIGN(sizeof(struct vsp1_dl_header), 8) + : 0; + + ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES, + header_size); + if (ret < 0) { + kfree(dl); + return NULL; + } + + if (dlm->mode == VSP1_DL_MODE_HEADER) { + size_t header_offset = VSP1_DL_NUM_ENTRIES + * sizeof(*dl->body0.entries); + + dl->header = ((void *)dl->body0.entries) + header_offset; + dl->dma = dl->body0.dma + header_offset; + + memset(dl->header, 0, sizeof(*dl->header)); + dl->header->lists[0].addr = dl->body0.dma; + dl->header->flags = VSP1_DLH_INT_ENABLE; + } + + return dl; } -void vsp1_dl_reset(struct vsp1_dl *dl) +static void vsp1_dl_list_free_fragments(struct vsp1_dl_list *dl) { - unsigned int i; + struct vsp1_dl_body *dlb, *next; - dl->lists.active = NULL; - dl->lists.queued = NULL; - dl->lists.pending = NULL; - dl->lists.write = NULL; + list_for_each_entry_safe(dlb, next, &dl->fragments, list) { + list_del(&dlb->list); + vsp1_dl_body_cleanup(dlb); + kfree(dlb); + } +} - for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) - dl->lists.all[i].in_use = false; +static void vsp1_dl_list_free(struct vsp1_dl_list *dl) +{ + vsp1_dl_body_cleanup(&dl->body0); + vsp1_dl_list_free_fragments(dl); + kfree(dl); } -void vsp1_dl_begin(struct vsp1_dl *dl) +/** + * vsp1_dl_list_get - Get a free display list + * @dlm: The display list manager + * + * Get a display list from the pool of free lists and return it. + * + * This function must be called without the display list manager lock held. + */ +struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm) { - struct vsp1_dl_list *list = NULL; + struct vsp1_dl_list *dl = NULL; unsigned long flags; - unsigned int i; - spin_lock_irqsave(&dl->lock, flags); + spin_lock_irqsave(&dlm->lock, flags); - for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) { - if (!dl->lists.all[i].in_use) { - list = &dl->lists.all[i]; - break; - } + if (!list_empty(&dlm->free)) { + dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); + list_del(&dl->list); } - if (!list) { - list = dl->lists.pending; - dl->lists.pending = NULL; - } + spin_unlock_irqrestore(&dlm->lock, flags); + + return dl; +} + +/* This function must be called with the display list manager lock held.*/ +static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) +{ + if (!dl) + return; + + vsp1_dl_list_free_fragments(dl); + dl->body0.num_entries = 0; + + list_add_tail(&dl->list, &dl->dlm->free); +} + +/** + * vsp1_dl_list_put - Release a display list + * @dl: The display list + * + * Release the display list and return it to the pool of free lists. + * + * Passing a NULL pointer to this function is safe, in that case no operation + * will be performed. + */ +void vsp1_dl_list_put(struct vsp1_dl_list *dl) +{ + unsigned long flags; - spin_unlock_irqrestore(&dl->lock, flags); + if (!dl) + return; - dl->lists.write = list; + spin_lock_irqsave(&dl->dlm->lock, flags); + __vsp1_dl_list_put(dl); + spin_unlock_irqrestore(&dl->dlm->lock, flags); +} - list->in_use = true; - list->reg_count = 0; +/** + * vsp1_dl_list_write - Write a register to the display list + * @dl: The display list + * @reg: The register address + * @data: The register value + * + * Write the given register and value to the display list. Up to 256 registers + * can be written per display list. + */ +void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data) +{ + vsp1_dl_fragment_write(&dl->body0, reg, data); } -void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data) +/** + * vsp1_dl_list_add_fragment - Add a fragment to the display list + * @dl: The display list + * @dlb: The fragment + * + * Add a display list body as a fragment to a display list. Registers contained + * in fragments are processed after registers contained in the main display + * list, in the order in which fragments are added. + * + * Adding a fragment to a display list passes ownership of the fragment to the + * list. The caller must not touch the fragment after this call, and must not + * free it explicitly with vsp1_dl_fragment_free(). + * + * Fragments are only usable for display lists in header mode. Attempt to + * add a fragment to a header-less display list will return an error. + */ +int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl, + struct vsp1_dl_body *dlb) { - struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity); - struct vsp1_dl *dl = pipe->dl; - struct vsp1_dl_list *list = dl->lists.write; + /* Multi-body lists are only available in header mode. */ + if (dl->dlm->mode != VSP1_DL_MODE_HEADER) + return -EINVAL; - list->body[list->reg_count].addr = reg; - list->body[list->reg_count].data = data; - list->reg_count++; + list_add_tail(&dlb->list, &dl->fragments); + return 0; } -void vsp1_dl_commit(struct vsp1_dl *dl) +void vsp1_dl_list_commit(struct vsp1_dl_list *dl) { - struct vsp1_device *vsp1 = dl->vsp1; - struct vsp1_dl_list *list; + struct vsp1_dl_manager *dlm = dl->dlm; + struct vsp1_device *vsp1 = dlm->vsp1; unsigned long flags; bool update; - list = dl->lists.write; - dl->lists.write = NULL; + spin_lock_irqsave(&dlm->lock, flags); + + if (dl->dlm->mode == VSP1_DL_MODE_HEADER) { + struct vsp1_dl_header_list *hdr = dl->header->lists; + struct vsp1_dl_body *dlb; + unsigned int num_lists = 0; + + /* Fill the header with the display list bodies addresses and + * sizes. The address of the first body has already been filled + * when the display list was allocated. + * + * In header mode the caller guarantees that the hardware is + * idle at this point. + */ + hdr->num_bytes = dl->body0.num_entries + * sizeof(*dl->header->lists); + + list_for_each_entry(dlb, &dl->fragments, list) { + num_lists++; + hdr++; + + hdr->addr = dlb->dma; + hdr->num_bytes = dlb->num_entries + * sizeof(*dl->header->lists); + } + + dl->header->num_lists = num_lists; + vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); - spin_lock_irqsave(&dl->lock, flags); + dlm->active = dl; + goto done; + } /* Once the UPD bit has been set the hardware can start processing the * display list at any time and we can't touch the address and size @@ -159,8 +426,8 @@ void vsp1_dl_commit(struct vsp1_dl *dl) */ update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD); if (update) { - vsp1_dl_free_list(dl->lists.pending); - dl->lists.pending = list; + __vsp1_dl_list_put(dlm->pending); + dlm->pending = dl; goto done; } @@ -168,42 +435,51 @@ void vsp1_dl_commit(struct vsp1_dl *dl) * The UPD bit will be cleared by the device when the display list is * processed. */ - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma); + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | - (list->reg_count * 8)); + (dl->body0.num_entries * sizeof(*dl->header->lists))); - vsp1_dl_free_list(dl->lists.queued); - dl->lists.queued = list; + __vsp1_dl_list_put(dlm->queued); + dlm->queued = dl; done: - spin_unlock_irqrestore(&dl->lock, flags); + spin_unlock_irqrestore(&dlm->lock, flags); } /* ----------------------------------------------------------------------------- - * Interrupt Handling + * Display List Manager */ -void vsp1_dl_irq_display_start(struct vsp1_dl *dl) +/* Interrupt Handling */ +void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm) { - spin_lock(&dl->lock); + spin_lock(&dlm->lock); /* The display start interrupt signals the end of the display list * processing by the device. The active display list, if any, won't be * accessed anymore and can be reused. */ - if (dl->lists.active) { - vsp1_dl_free_list(dl->lists.active); - dl->lists.active = NULL; - } + __vsp1_dl_list_put(dlm->active); + dlm->active = NULL; - spin_unlock(&dl->lock); + spin_unlock(&dlm->lock); } -void vsp1_dl_irq_frame_end(struct vsp1_dl *dl) +void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) { - struct vsp1_device *vsp1 = dl->vsp1; + struct vsp1_device *vsp1 = dlm->vsp1; + + spin_lock(&dlm->lock); + + __vsp1_dl_list_put(dlm->active); + dlm->active = NULL; - spin_lock(&dl->lock); + /* Header mode is used for mem-to-mem pipelines only. We don't need to + * perform any operation as there can't be any new display list queued + * in that case. + */ + if (dlm->mode == VSP1_DL_MODE_HEADER) + goto done; /* The UPD bit set indicates that the commit operation raced with the * interrupt and occurred after the frame end event and UPD clear but @@ -216,42 +492,39 @@ void vsp1_dl_irq_frame_end(struct vsp1_dl *dl) /* The device starts processing the queued display list right after the * frame end interrupt. The display list thus becomes active. */ - if (dl->lists.queued) { - WARN_ON(dl->lists.active); - dl->lists.active = dl->lists.queued; - dl->lists.queued = NULL; + if (dlm->queued) { + dlm->active = dlm->queued; + dlm->queued = NULL; } /* Now that the UPD bit has been cleared we can queue the next display * list to the hardware if one has been prepared. */ - if (dl->lists.pending) { - struct vsp1_dl_list *list = dl->lists.pending; + if (dlm->pending) { + struct vsp1_dl_list *dl = dlm->pending; - vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma); + vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma); vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD | - (list->reg_count * 8)); + (dl->body0.num_entries * + sizeof(*dl->header->lists))); - dl->lists.queued = list; - dl->lists.pending = NULL; + dlm->queued = dl; + dlm->pending = NULL; } done: - spin_unlock(&dl->lock); + spin_unlock(&dlm->lock); } -/* ----------------------------------------------------------------------------- - * Hardware Setup - */ - -void vsp1_dl_setup(struct vsp1_device *vsp1) +/* Hardware Setup */ +void vsp1_dlm_setup(struct vsp1_device *vsp1) { u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 | VI6_DL_CTRL_DLE; - /* The DRM pipeline operates with header-less display lists in - * Continuous Frame Mode. + /* The DRM pipeline operates with display lists in Continuous Frame + * Mode, all other pipelines use manual start. */ if (vsp1->drm) ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0; @@ -260,46 +533,64 @@ void vsp1_dl_setup(struct vsp1_device *vsp1) vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); } -/* ----------------------------------------------------------------------------- - * Initialization and Cleanup - */ +void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) +{ + unsigned long flags; + + spin_lock_irqsave(&dlm->lock, flags); -struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1) + __vsp1_dl_list_put(dlm->active); + __vsp1_dl_list_put(dlm->queued); + __vsp1_dl_list_put(dlm->pending); + + spin_unlock_irqrestore(&dlm->lock, flags); + + dlm->active = NULL; + dlm->queued = NULL; + dlm->pending = NULL; +} + +struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, + unsigned int index, + unsigned int prealloc) { - struct vsp1_dl *dl; + struct vsp1_dl_manager *dlm; unsigned int i; - dl = kzalloc(sizeof(*dl), GFP_KERNEL); - if (!dl) + dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); + if (!dlm) return NULL; - spin_lock_init(&dl->lock); + dlm->index = index; + dlm->mode = index == 0 && !vsp1->info->uapi + ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER; + dlm->vsp1 = vsp1; - dl->vsp1 = vsp1; - dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all); + spin_lock_init(&dlm->lock); + INIT_LIST_HEAD(&dlm->free); - dl->mem = dma_alloc_wc(vsp1->dev, dl->size, &dl->dma, - GFP_KERNEL); - if (!dl->mem) { - kfree(dl); - return NULL; - } + for (i = 0; i < prealloc; ++i) { + struct vsp1_dl_list *dl; - for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) { - struct vsp1_dl_list *list = &dl->lists.all[i]; + dl = vsp1_dl_list_alloc(dlm); + if (!dl) + return NULL; - list->size = VSP1_DL_BODY_SIZE; - list->reg_count = 0; - list->in_use = false; - list->dma = dl->dma + VSP1_DL_BODY_SIZE * i; - list->body = dl->mem + VSP1_DL_BODY_SIZE * i; + list_add_tail(&dl->list, &dlm->free); } - return dl; + return dlm; } -void vsp1_dl_destroy(struct vsp1_dl *dl) +void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) { - dma_free_wc(dl->vsp1->dev, dl->size, dl->mem, dl->dma); - kfree(dl); + struct vsp1_dl_list *dl, *next; + + if (!dlm) + return; + + list_for_each_entry_safe(dl, next, &dlm->free, list) { + list_del(&dl->list); + vsp1_dl_list_free(dl); + } } -- cgit v1.2.3-54-g00ecf