summaryrefslogtreecommitdiff
path: root/drivers/crypto/ccp/ccp-dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ccp/ccp-dev.c')
-rw-r--r--drivers/crypto/ccp/ccp-dev.c471
1 files changed, 148 insertions, 323 deletions
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 861bacc1b..4dbc18727 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/rwlock_types.h>
+#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
@@ -37,20 +39,107 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-static struct ccp_device *ccp_dev;
-static inline struct ccp_device *ccp_get_device(void)
+/* List of CCPs, CCP count, read-write access lock, and access functions
+ *
+ * Lock structure: get ccp_unit_lock for reading whenever we need to
+ * examine the CCP list. While holding it for reading we can acquire
+ * the RR lock to update the round-robin next-CCP pointer. The unit lock
+ * must be acquired before the RR lock.
+ *
+ * If the unit-lock is acquired for writing, we have total control over
+ * the list, so there's no value in getting the RR lock.
+ */
+static DEFINE_RWLOCK(ccp_unit_lock);
+static LIST_HEAD(ccp_units);
+
+/* Round-robin counter */
+static DEFINE_SPINLOCK(ccp_rr_lock);
+static struct ccp_device *ccp_rr;
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t ccp_unit_ordinal;
+unsigned int ccp_increment_unit_ordinal(void)
{
- return ccp_dev;
+ return atomic_inc_return(&ccp_unit_ordinal);
}
-static inline void ccp_add_device(struct ccp_device *ccp)
+/**
+ * ccp_add_device - add a CCP device to the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Put this CCP on the unit list, which makes it available
+ * for use.
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+void ccp_add_device(struct ccp_device *ccp)
{
- ccp_dev = ccp;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ list_add_tail(&ccp->entry, &ccp_units);
+ if (!ccp_rr)
+ /* We already have the list lock (we're first) so this
+ * pointer can't change on us. Set its initial value.
+ */
+ ccp_rr = ccp;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
}
-static inline void ccp_del_device(struct ccp_device *ccp)
+/**
+ * ccp_del_device - remove a CCP device from the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Remove this unit from the list of devices. If the next device
+ * up for use is this one, adjust the pointer. If this is the last
+ * device, NULL the pointer.
+ */
+void ccp_del_device(struct ccp_device *ccp)
{
- ccp_dev = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ if (ccp_rr == ccp) {
+ /* ccp_unit_lock is read/write; any read access
+ * will be suspended while we make changes to the
+ * list and RR pointer.
+ */
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ }
+ list_del(&ccp->entry);
+ if (list_empty(&ccp_units))
+ ccp_rr = NULL;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+static struct ccp_device *ccp_get_device(void)
+{
+ unsigned long flags;
+ struct ccp_device *dp = NULL;
+
+ /* We round-robin through the unit list.
+ * The (ccp_rr) pointer refers to the next unit to use.
+ */
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ spin_lock(&ccp_rr_lock);
+ dp = ccp_rr;
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ spin_unlock(&ccp_rr_lock);
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return dp;
}
/**
@@ -60,14 +149,41 @@ static inline void ccp_del_device(struct ccp_device *ccp)
*/
int ccp_present(void)
{
- if (ccp_get_device())
- return 0;
+ unsigned long flags;
+ int ret;
- return -ENODEV;
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ ret = list_empty(&ccp_units);
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret ? -ENODEV : 0;
}
EXPORT_SYMBOL_GPL(ccp_present);
/**
+ * ccp_version - get the version of the CCP device
+ *
+ * Returns the version from the first unit on the list;
+ * otherwise a zero if no CCP device is present
+ */
+unsigned int ccp_version(void)
+{
+ struct ccp_device *dp;
+ unsigned long flags;
+ int ret = 0;
+
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ dp = list_first_entry(&ccp_units, struct ccp_device, entry);
+ ret = dp->vdata->version;
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_version);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
@@ -221,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data)
complete(&tdata->completion);
}
-static int ccp_cmd_queue_thread(void *data)
+/**
+ * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
+ *
+ * @data: thread-specific data
+ */
+int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
@@ -257,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data)
return 0;
}
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
- u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
-
- /*
- * Locking is provided by the caller so we can update device
- * hwrng-related fields safely
- */
- trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
- if (!trng_value) {
- /* Zero is returned if not data is available or if a
- * bad-entropy error is present. Assume an error if
- * we exceed TRNG_RETRIES reads of zero.
- */
- if (ccp->hwrng_retries++ > TRNG_RETRIES)
- return -EIO;
-
- return 0;
- }
-
- /* Reset the counter and save the rng value */
- ccp->hwrng_retries = 0;
- memcpy(data, &trng_value, len);
-
- return len;
-}
-
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
@@ -309,253 +401,11 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
- return ccp;
-}
-
-/**
- * ccp_init - initialize the CCP device
- *
- * @ccp: ccp_device struct
- */
-int ccp_init(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct ccp_cmd_queue *cmd_q;
- struct dma_pool *dma_pool;
- char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
- int ret;
-
- /* Find available queues */
- qim = 0;
- qmr = ioread32(ccp->io_regs + Q_MASK_REG);
- for (i = 0; i < MAX_HW_QUEUES; i++) {
- if (!(qmr & (1 << i)))
- continue;
-
- /* Allocate a dma pool for this queue */
- snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
- dma_pool = dma_pool_create(dma_pool_name, dev,
- CCP_DMAPOOL_MAX_SIZE,
- CCP_DMAPOOL_ALIGN, 0);
- if (!dma_pool) {
- dev_err(dev, "unable to allocate dma pool\n");
- ret = -ENOMEM;
- goto e_pool;
- }
-
- cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
- ccp->cmd_q_count++;
-
- cmd_q->ccp = ccp;
- cmd_q->id = i;
- cmd_q->dma_pool = dma_pool;
-
- /* Reserve 2 KSB regions for the queue */
- cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
- cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
- ccp->ksb_count -= 2;
-
- /* Preset some register values and masks that are queue
- * number dependent
- */
- cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->int_ok = 1 << (i * 2);
- cmd_q->int_err = 1 << ((i * 2) + 1);
-
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
-
- init_waitqueue_head(&cmd_q->int_queue);
-
- /* Build queue interrupt mask (two interrupts per queue) */
- qim |= cmd_q->int_ok | cmd_q->int_err;
+ ccp->ord = ccp_increment_unit_ordinal();
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
-#ifdef CONFIG_ARM64
- /* For arm64 set the recommended queue cache settings */
- iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
- (CMD_Q_CACHE_INC * i));
-#endif
-
- dev_dbg(dev, "queue #%u available\n", i);
- }
- if (ccp->cmd_q_count == 0) {
- dev_notice(dev, "no command queues available\n");
- ret = -EIO;
- goto e_pool;
- }
- dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
-
- /* Disable and clear interrupts until ready */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- /* Request an irq */
- ret = ccp->get_irq(ccp);
- if (ret) {
- dev_err(dev, "unable to allocate an IRQ\n");
- goto e_pool;
- }
-
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->ksb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
- /* Create a kthread for each queue */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- struct task_struct *kthread;
-
- cmd_q = &ccp->cmd_q[i];
-
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "ccp-q%u", cmd_q->id);
- if (IS_ERR(kthread)) {
- dev_err(dev, "error creating queue thread (%ld)\n",
- PTR_ERR(kthread));
- ret = PTR_ERR(kthread);
- goto e_kthread;
- }
-
- cmd_q->kthread = kthread;
- wake_up_process(kthread);
- }
-
- /* Register the RNG */
- ccp->hwrng.name = "ccp-rng";
- ccp->hwrng.read = ccp_trng_read;
- ret = hwrng_register(&ccp->hwrng);
- if (ret) {
- dev_err(dev, "error registering hwrng (%d)\n", ret);
- goto e_kthread;
- }
-
- /* Make the device struct available before enabling interrupts */
- ccp_add_device(ccp);
-
- /* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
- return 0;
-
-e_kthread:
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- ccp->free_irq(ccp);
-
-e_pool:
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- return ret;
-}
-
-/**
- * ccp_destroy - tear down the CCP device
- *
- * @ccp: ccp_device struct
- */
-void ccp_destroy(struct ccp_device *ccp)
-{
- struct ccp_cmd_queue *cmd_q;
- struct ccp_cmd *cmd;
- unsigned int qim, i;
-
- /* Remove general access to the device struct */
- ccp_del_device(ccp);
-
- /* Unregister the RNG */
- hwrng_unregister(&ccp->hwrng);
-
- /* Stop the queue kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- /* Build queue interrupt mask (two interrupt masks per queue) */
- qim = 0;
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- qim |= cmd_q->int_ok | cmd_q->int_err;
- }
-
- /* Disable and clear interrupts */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- ccp->free_irq(ccp);
-
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- /* Flush the cmd and backlog queue */
- while (!list_empty(&ccp->cmd)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
- while (!list_empty(&ccp->backlog)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
-}
-
-/**
- * ccp_irq_handler - handle interrupts generated by the CCP device
- *
- * @irq: the irq associated with the interrupt
- * @data: the data value supplied when the irq was created
- */
-irqreturn_t ccp_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- struct ccp_cmd_queue *cmd_q;
- u32 q_int, status;
- unsigned int i;
-
- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
- if (q_int) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
+ return ccp;
}
#ifdef CONFIG_PM
@@ -577,41 +427,22 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
}
#endif
-#ifdef CONFIG_X86
-static const struct x86_cpu_id ccp_support[] = {
- { X86_VENDOR_AMD, 22, },
- { },
-};
-#endif
-
static int __init ccp_mod_init(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
- if (!x86_match_cpu(ccp_support))
- return -ENODEV;
-
- switch (cpuinfo->x86) {
- case 22:
- if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
- return -ENODEV;
-
- ret = ccp_pci_init();
- if (ret)
- return ret;
-
- /* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
- ccp_pci_exit();
- return -ENODEV;
- }
-
- return 0;
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
- break;
+ /* Don't leave the driver loaded if init failed */
+ if (ccp_present() != 0) {
+ ccp_pci_exit();
+ return -ENODEV;
}
+
+ return 0;
#endif
#ifdef CONFIG_ARM64
@@ -622,7 +453,7 @@ static int __init ccp_mod_init(void)
return ret;
/* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
+ if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
@@ -636,13 +467,7 @@ static int __init ccp_mod_init(void)
static void __exit ccp_mod_exit(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
-
- switch (cpuinfo->x86) {
- case 22:
- ccp_pci_exit();
- break;
- }
+ ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64