diff options
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/Kconfig | 17 | ||||
-rw-r--r-- | net/iucv/Makefile | 6 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 2463 | ||||
-rw-r--r-- | net/iucv/iucv.c | 2119 |
4 files changed, 4605 insertions, 0 deletions
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig new file mode 100644 index 000000000..497fbe732 --- /dev/null +++ b/net/iucv/Kconfig @@ -0,0 +1,17 @@ +config IUCV + depends on S390 + def_tristate y if S390 + prompt "IUCV support (S390 - z/VM only)" + help + Select this option if you want to use inter-user communication + under VM or VIF. If you run on z/VM, say "Y" to enable a fast + communication link between VM guests. + +config AFIUCV + depends on S390 + def_tristate m if QETH_L3 || IUCV + prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)" + help + Select this option if you want to use AF_IUCV socket applications + based on z/VM inter-user communication vehicle or based on + HiperSockets. diff --git a/net/iucv/Makefile b/net/iucv/Makefile new file mode 100644 index 000000000..7bfdc8532 --- /dev/null +++ b/net/iucv/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for IUCV +# + +obj-$(CONFIG_IUCV) += iucv.o +obj-$(CONFIG_AFIUCV) += af_iucv.o diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c new file mode 100644 index 000000000..6daa52a18 --- /dev/null +++ b/net/iucv/af_iucv.c @@ -0,0 +1,2463 @@ +/* + * IUCV protocol stack for Linux on zSeries + * + * Copyright IBM Corp. 2006, 2009 + * + * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> + * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * PM functions: + * Ursula Braun <ursula.braun@de.ibm.com> + */ + +#define KMSG_COMPONENT "af_iucv" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/skbuff.h> +#include <linux/init.h> +#include <linux/poll.h> +#include <net/sock.h> +#include <asm/ebcdic.h> +#include <asm/cpcmd.h> +#include <linux/kmod.h> + +#include <net/iucv/af_iucv.h> + +#define VERSION "1.2" + +static char iucv_userid[80]; + +static const struct proto_ops iucv_sock_ops; + +static struct proto iucv_proto = { + .name = "AF_IUCV", + .owner = THIS_MODULE, + .obj_size = sizeof(struct iucv_sock), +}; + +static struct iucv_interface *pr_iucv; + +/* special AF_IUCV IPRM messages */ +static const u8 iprm_shutdown[8] = + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; + +#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) + +#define __iucv_sock_wait(sk, condition, timeo, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + long __timeo = timeo; \ + ret = 0; \ + prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ + while (!(condition)) { \ + if (!__timeo) { \ + ret = -EAGAIN; \ + break; \ + } \ + if (signal_pending(current)) { \ + ret = sock_intr_errno(__timeo); \ + break; \ + } \ + release_sock(sk); \ + __timeo = schedule_timeout(__timeo); \ + lock_sock(sk); \ + ret = sock_error(sk); \ + if (ret) \ + break; \ + } \ + finish_wait(sk_sleep(sk), &__wait); \ +} while (0) + +#define iucv_sock_wait(sk, condition, timeo) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __iucv_sock_wait(sk, condition, timeo, __ret); \ + __ret; \ +}) + +static void iucv_sock_kill(struct sock *sk); +static void iucv_sock_close(struct sock *sk); +static void iucv_sever_path(struct sock *, int); + +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, + struct sk_buff *skb, u8 flags); +static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); + +/* Call Back functions */ +static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); +static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); +static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); +static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], + u8 ipuser[16]); +static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); +static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]); + +static struct iucv_sock_list iucv_sk_list = { + .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), + .autobind_name = ATOMIC_INIT(0) +}; + +static struct iucv_handler af_iucv_handler = { + .path_pending = iucv_callback_connreq, + .path_complete = iucv_callback_connack, + .path_severed = iucv_callback_connrej, + .message_pending = iucv_callback_rx, + .message_complete = iucv_callback_txdone, + .path_quiesced = iucv_callback_shutdown, +}; + +static inline void high_nmcpy(unsigned char *dst, char *src) +{ + memcpy(dst, src, 8); +} + +static inline void low_nmcpy(unsigned char *dst, char *src) +{ + memcpy(&dst[8], src, 8); +} + +static int afiucv_pm_prepare(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_prepare\n"); +#endif + return 0; +} + +static void afiucv_pm_complete(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_complete\n"); +#endif +} + +/** + * afiucv_pm_freeze() - Freeze PM callback + * @dev: AFIUCV dummy device + * + * Sever all established IUCV communication pathes + */ +static int afiucv_pm_freeze(struct device *dev) +{ + struct iucv_sock *iucv; + struct sock *sk; + int err = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_freeze\n"); +#endif + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, &iucv_sk_list.head) { + iucv = iucv_sk(sk); + switch (sk->sk_state) { + case IUCV_DISCONN: + case IUCV_CLOSING: + case IUCV_CONNECTED: + iucv_sever_path(sk, 0); + break; + case IUCV_OPEN: + case IUCV_BOUND: + case IUCV_LISTEN: + case IUCV_CLOSED: + default: + break; + } + skb_queue_purge(&iucv->send_skb_q); + skb_queue_purge(&iucv->backlog_skb_q); + } + read_unlock(&iucv_sk_list.lock); + return err; +} + +/** + * afiucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev: AFIUCV dummy device + * + * socket clean up after freeze + */ +static int afiucv_pm_restore_thaw(struct device *dev) +{ + struct sock *sk; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); +#endif + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, &iucv_sk_list.head) { + switch (sk->sk_state) { + case IUCV_CONNECTED: + sk->sk_err = EPIPE; + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + break; + case IUCV_DISCONN: + case IUCV_CLOSING: + case IUCV_LISTEN: + case IUCV_BOUND: + case IUCV_OPEN: + default: + break; + } + } + read_unlock(&iucv_sk_list.lock); + return 0; +} + +static const struct dev_pm_ops afiucv_pm_ops = { + .prepare = afiucv_pm_prepare, + .complete = afiucv_pm_complete, + .freeze = afiucv_pm_freeze, + .thaw = afiucv_pm_restore_thaw, + .restore = afiucv_pm_restore_thaw, +}; + +static struct device_driver af_iucv_driver = { + .owner = THIS_MODULE, + .name = "afiucv", + .bus = NULL, + .pm = &afiucv_pm_ops, +}; + +/* dummy device used as trigger for PM functions */ +static struct device *af_iucv_dev; + +/** + * iucv_msg_length() - Returns the length of an iucv message. + * @msg: Pointer to struct iucv_message, MUST NOT be NULL + * + * The function returns the length of the specified iucv message @msg of data + * stored in a buffer and of data stored in the parameter list (PRMDATA). + * + * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket + * data: + * PRMDATA[0..6] socket data (max 7 bytes); + * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) + * + * The socket data length is computed by subtracting the socket data length + * value from 0xFF. + * If the socket data len is greater 7, then PRMDATA can be used for special + * notifications (see iucv_sock_shutdown); and further, + * if the socket data len is > 7, the function returns 8. + * + * Use this function to allocate socket buffers to store iucv message data. + */ +static inline size_t iucv_msg_length(struct iucv_message *msg) +{ + size_t datalen; + + if (msg->flags & IUCV_IPRMDATA) { + datalen = 0xff - msg->rmmsg[7]; + return (datalen < 8) ? datalen : 8; + } + return msg->length; +} + +/** + * iucv_sock_in_state() - check for specific states + * @sk: sock structure + * @state: first iucv sk state + * @state: second iucv sk state + * + * Returns true if the socket in either in the first or second state. + */ +static int iucv_sock_in_state(struct sock *sk, int state, int state2) +{ + return (sk->sk_state == state || sk->sk_state == state2); +} + +/** + * iucv_below_msglim() - function to check if messages can be sent + * @sk: sock structure + * + * Returns true if the send queue length is lower than the message limit. + * Always returns true if the socket is not connected (no iucv path for + * checking the message limit). + */ +static inline int iucv_below_msglim(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (sk->sk_state != IUCV_CONNECTED) + return 1; + if (iucv->transport == AF_IUCV_TRANS_IUCV) + return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); + else + return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && + (atomic_read(&iucv->pendings) <= 0)); +} + +/** + * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit + */ +static void iucv_sock_wake_msglim(struct sock *sk) +{ + struct socket_wq *wq; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (wq_has_sleeper(wq)) + wake_up_interruptible_all(&wq->wait); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + rcu_read_unlock(); +} + +/** + * afiucv_hs_send() - send a message through HiperSockets transport + */ +static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, + struct sk_buff *skb, u8 flags) +{ + struct iucv_sock *iucv = iucv_sk(sock); + struct af_iucv_trans_hdr *phs_hdr; + struct sk_buff *nskb; + int err, confirm_recv = 0; + + memset(skb->head, 0, ETH_HLEN); + phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb, + sizeof(struct af_iucv_trans_hdr)); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); + + phs_hdr->magic = ETH_P_AF_IUCV; + phs_hdr->version = 1; + phs_hdr->flags = flags; + if (flags == AF_IUCV_FLAG_SYN) + phs_hdr->window = iucv->msglimit; + else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { + confirm_recv = atomic_read(&iucv->msg_recv); + phs_hdr->window = confirm_recv; + if (confirm_recv) + phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; + } + memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); + memcpy(phs_hdr->destAppName, iucv->dst_name, 8); + memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); + memcpy(phs_hdr->srcAppName, iucv->src_name, 8); + ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); + ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); + ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); + ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); + if (imsg) + memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); + + skb->dev = iucv->hs_dev; + if (!skb->dev) + return -ENODEV; + if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) + return -ENETDOWN; + if (skb->len > skb->dev->mtu) { + if (sock->sk_type == SOCK_SEQPACKET) + return -EMSGSIZE; + else + skb_trim(skb, skb->dev->mtu); + } + skb->protocol = ETH_P_AF_IUCV; + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + skb_queue_tail(&iucv->send_skb_q, nskb); + err = dev_queue_xmit(skb); + if (net_xmit_eval(err)) { + skb_unlink(nskb, &iucv->send_skb_q); + kfree_skb(nskb); + } else { + atomic_sub(confirm_recv, &iucv->msg_recv); + WARN_ON(atomic_read(&iucv->msg_recv) < 0); + } + return net_xmit_eval(err); +} + +static struct sock *__iucv_get_sock_by_name(char *nm) +{ + struct sock *sk; + + sk_for_each(sk, &iucv_sk_list.head) + if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) + return sk; + + return NULL; +} + +static void iucv_sock_destruct(struct sock *sk) +{ + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_error_queue); + + sk_mem_reclaim(sk); + + if (!sock_flag(sk, SOCK_DEAD)) { + pr_err("Attempt to release alive iucv socket %p\n", sk); + return; + } + + WARN_ON(atomic_read(&sk->sk_rmem_alloc)); + WARN_ON(atomic_read(&sk->sk_wmem_alloc)); + WARN_ON(sk->sk_wmem_queued); + WARN_ON(sk->sk_forward_alloc); +} + +/* Cleanup Listen */ +static void iucv_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + /* Close non-accepted connections */ + while ((sk = iucv_accept_dequeue(parent, NULL))) { + iucv_sock_close(sk); + iucv_sock_kill(sk); + } + + parent->sk_state = IUCV_CLOSED; +} + +/* Kill socket (only if zapped and orphaned) */ +static void iucv_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + iucv_sock_unlink(&iucv_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +/* Terminate an IUCV path */ +static void iucv_sever_path(struct sock *sk, int with_user_data) +{ + unsigned char user_data[16]; + struct iucv_sock *iucv = iucv_sk(sk); + struct iucv_path *path = iucv->path; + + if (iucv->path) { + iucv->path = NULL; + if (with_user_data) { + low_nmcpy(user_data, iucv->src_name); + high_nmcpy(user_data, iucv->dst_name); + ASCEBC(user_data, sizeof(user_data)); + pr_iucv->path_sever(path, user_data); + } else + pr_iucv->path_sever(path, NULL); + iucv_path_free(path); + } +} + +/* Send FIN through an IUCV socket for HIPER transport */ +static int iucv_send_ctrl(struct sock *sk, u8 flags) +{ + int err = 0; + int blen; + struct sk_buff *skb; + + blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; + skb = sock_alloc_send_skb(sk, blen, 1, &err); + if (skb) { + skb_reserve(skb, blen); + err = afiucv_hs_send(NULL, sk, skb, flags); + } + return err; +} + +/* Close an IUCV socket */ +static void iucv_sock_close(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + unsigned long timeo; + int err = 0; + + lock_sock(sk); + + switch (sk->sk_state) { + case IUCV_LISTEN: + iucv_sock_cleanup_listen(sk); + break; + + case IUCV_CONNECTED: + if (iucv->transport == AF_IUCV_TRANS_HIPER) { + err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + case IUCV_DISCONN: /* fall through */ + sk->sk_state = IUCV_CLOSING; + sk->sk_state_change(sk); + + if (!err && !skb_queue_empty(&iucv->send_skb_q)) { + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + timeo = sk->sk_lingertime; + else + timeo = IUCV_DISCONN_TIMEOUT; + iucv_sock_wait(sk, + iucv_sock_in_state(sk, IUCV_CLOSED, 0), + timeo); + } + + case IUCV_CLOSING: /* fall through */ + sk->sk_state = IUCV_CLOSED; + sk->sk_state_change(sk); + + sk->sk_err = ECONNRESET; + sk->sk_state_change(sk); + + skb_queue_purge(&iucv->send_skb_q); + skb_queue_purge(&iucv->backlog_skb_q); + + default: /* fall through */ + iucv_sever_path(sk, 1); + } + + if (iucv->hs_dev) { + dev_put(iucv->hs_dev); + iucv->hs_dev = NULL; + sk->sk_bound_dev_if = 0; + } + + /* mark socket for deletion by iucv_sock_kill() */ + sock_set_flag(sk, SOCK_ZAPPED); + + release_sock(sk); +} + +static void iucv_sock_init(struct sock *sk, struct sock *parent) +{ + if (parent) + sk->sk_type = parent->sk_type; +} + +static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) +{ + struct sock *sk; + struct iucv_sock *iucv; + + sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); + if (!sk) + return NULL; + iucv = iucv_sk(sk); + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&iucv->accept_q); + spin_lock_init(&iucv->accept_q_lock); + skb_queue_head_init(&iucv->send_skb_q); + INIT_LIST_HEAD(&iucv->message_q.list); + spin_lock_init(&iucv->message_q.lock); + skb_queue_head_init(&iucv->backlog_skb_q); + iucv->send_tag = 0; + atomic_set(&iucv->pendings, 0); + iucv->flags = 0; + iucv->msglimit = 0; + atomic_set(&iucv->msg_sent, 0); + atomic_set(&iucv->msg_recv, 0); + iucv->path = NULL; + iucv->sk_txnotify = afiucv_hs_callback_txnotify; + memset(&iucv->src_user_id , 0, 32); + if (pr_iucv) + iucv->transport = AF_IUCV_TRANS_IUCV; + else + iucv->transport = AF_IUCV_TRANS_HIPER; + + sk->sk_destruct = iucv_sock_destruct; + sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; + sk->sk_allocation = GFP_DMA; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = IUCV_OPEN; + + iucv_sock_link(&iucv_sk_list, sk); + return sk; +} + +/* Create an IUCV socket */ +static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + if (protocol && protocol != PF_IUCV) + return -EPROTONOSUPPORT; + + sock->state = SS_UNCONNECTED; + + switch (sock->type) { + case SOCK_STREAM: + sock->ops = &iucv_sock_ops; + break; + case SOCK_SEQPACKET: + /* currently, proto ops can handle both sk types */ + sock->ops = &iucv_sock_ops; + break; + default: + return -ESOCKTNOSUPPORT; + } + + sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); + if (!sk) + return -ENOMEM; + + iucv_sock_init(sk, NULL); + + return 0; +} + +void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_add_node(sk, &l->head); + write_unlock_bh(&l->lock); +} + +void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) +{ + write_lock_bh(&l->lock); + sk_del_node_init(sk); + write_unlock_bh(&l->lock); +} + +void iucv_accept_enqueue(struct sock *parent, struct sock *sk) +{ + unsigned long flags; + struct iucv_sock *par = iucv_sk(parent); + + sock_hold(sk); + spin_lock_irqsave(&par->accept_q_lock, flags); + list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); + spin_unlock_irqrestore(&par->accept_q_lock, flags); + iucv_sk(sk)->parent = parent; + sk_acceptq_added(parent); +} + +void iucv_accept_unlink(struct sock *sk) +{ + unsigned long flags; + struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); + + spin_lock_irqsave(&par->accept_q_lock, flags); + list_del_init(&iucv_sk(sk)->accept_q); + spin_unlock_irqrestore(&par->accept_q_lock, flags); + sk_acceptq_removed(iucv_sk(sk)->parent); + iucv_sk(sk)->parent = NULL; + sock_put(sk); +} + +struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) +{ + struct iucv_sock *isk, *n; + struct sock *sk; + + list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { + sk = (struct sock *) isk; + lock_sock(sk); + + if (sk->sk_state == IUCV_CLOSED) { + iucv_accept_unlink(sk); + release_sock(sk); + continue; + } + + if (sk->sk_state == IUCV_CONNECTED || + sk->sk_state == IUCV_DISCONN || + !newsock) { + iucv_accept_unlink(sk); + if (newsock) + sock_graft(sk, newsock); + + release_sock(sk); + return sk; + } + + release_sock(sk); + } + return NULL; +} + +static void __iucv_auto_name(struct iucv_sock *iucv) +{ + char name[12]; + + sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); + while (__iucv_get_sock_by_name(name)) { + sprintf(name, "%08x", + atomic_inc_return(&iucv_sk_list.autobind_name)); + } + memcpy(iucv->src_name, name, 8); +} + +/* Bind an unbound socket */ +static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv; + int err = 0; + struct net_device *dev; + char uid[9]; + + /* Verify the input sockaddr */ + if (!addr || addr->sa_family != AF_IUCV) + return -EINVAL; + + lock_sock(sk); + if (sk->sk_state != IUCV_OPEN) { + err = -EBADFD; + goto done; + } + + write_lock_bh(&iucv_sk_list.lock); + + iucv = iucv_sk(sk); + if (__iucv_get_sock_by_name(sa->siucv_name)) { + err = -EADDRINUSE; + goto done_unlock; + } + if (iucv->path) + goto done_unlock; + + /* Bind the socket */ + if (pr_iucv) + if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) + goto vm_bind; /* VM IUCV transport */ + + /* try hiper transport */ + memcpy(uid, sa->siucv_user_id, sizeof(uid)); + ASCEBC(uid, 8); + rcu_read_lock(); + for_each_netdev_rcu(&init_net, dev) { + if (!memcmp(dev->perm_addr, uid, 8)) { + memcpy(iucv->src_user_id, sa->siucv_user_id, 8); + /* Check for unitialized siucv_name */ + if (strncmp(sa->siucv_name, " ", 8) == 0) + __iucv_auto_name(iucv); + else + memcpy(iucv->src_name, sa->siucv_name, 8); + sk->sk_bound_dev_if = dev->ifindex; + iucv->hs_dev = dev; + dev_hold(dev); + sk->sk_state = IUCV_BOUND; + iucv->transport = AF_IUCV_TRANS_HIPER; + if (!iucv->msglimit) + iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; + rcu_read_unlock(); + goto done_unlock; + } + } + rcu_read_unlock(); +vm_bind: + if (pr_iucv) { + /* use local userid for backward compat */ + memcpy(iucv->src_name, sa->siucv_name, 8); + memcpy(iucv->src_user_id, iucv_userid, 8); + sk->sk_state = IUCV_BOUND; + iucv->transport = AF_IUCV_TRANS_IUCV; + if (!iucv->msglimit) + iucv->msglimit = IUCV_QUEUELEN_DEFAULT; + goto done_unlock; + } + /* found no dev to bind */ + err = -ENODEV; +done_unlock: + /* Release the socket list lock */ + write_unlock_bh(&iucv_sk_list.lock); +done: + release_sock(sk); + return err; +} + +/* Automatically bind an unbound socket */ +static int iucv_sock_autobind(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + int err = 0; + + if (unlikely(!pr_iucv)) + return -EPROTO; + + memcpy(iucv->src_user_id, iucv_userid, 8); + + write_lock_bh(&iucv_sk_list.lock); + __iucv_auto_name(iucv); + write_unlock_bh(&iucv_sk_list.lock); + + if (!iucv->msglimit) + iucv->msglimit = IUCV_QUEUELEN_DEFAULT; + + return err; +} + +static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) +{ + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + unsigned char user_data[16]; + int err; + + high_nmcpy(user_data, sa->siucv_name); + low_nmcpy(user_data, iucv->src_name); + ASCEBC(user_data, sizeof(user_data)); + + /* Create path. */ + iucv->path = iucv_path_alloc(iucv->msglimit, + IUCV_IPRMDATA, GFP_KERNEL); + if (!iucv->path) { + err = -ENOMEM; + goto done; + } + err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, + sa->siucv_user_id, NULL, user_data, + sk); + if (err) { + iucv_path_free(iucv->path); + iucv->path = NULL; + switch (err) { + case 0x0b: /* Target communicator is not logged on */ + err = -ENETUNREACH; + break; + case 0x0d: /* Max connections for this guest exceeded */ + case 0x0e: /* Max connections for target guest exceeded */ + err = -EAGAIN; + break; + case 0x0f: /* Missing IUCV authorization */ + err = -EACCES; + break; + default: + err = -ECONNREFUSED; + break; + } + } +done: + return err; +} + +/* Connect an unconnected socket */ +static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + int err; + + if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) + return -EINVAL; + + if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) + return -EBADFD; + + if (sk->sk_state == IUCV_OPEN && + iucv->transport == AF_IUCV_TRANS_HIPER) + return -EBADFD; /* explicit bind required */ + + if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + if (sk->sk_state == IUCV_OPEN) { + err = iucv_sock_autobind(sk); + if (unlikely(err)) + return err; + } + + lock_sock(sk); + + /* Set the destination information */ + memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); + memcpy(iucv->dst_name, sa->siucv_name, 8); + + if (iucv->transport == AF_IUCV_TRANS_HIPER) + err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); + else + err = afiucv_path_connect(sock, addr); + if (err) + goto done; + + if (sk->sk_state != IUCV_CONNECTED) + err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, + IUCV_DISCONN), + sock_sndtimeo(sk, flags & O_NONBLOCK)); + + if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) + err = -ECONNREFUSED; + + if (err && iucv->transport == AF_IUCV_TRANS_IUCV) + iucv_sever_path(sk, 0); + +done: + release_sock(sk); + return err; +} + +/* Move a socket into listening state. */ +static int iucv_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err; + + lock_sock(sk); + + err = -EINVAL; + if (sk->sk_state != IUCV_BOUND) + goto done; + + if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) + goto done; + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = IUCV_LISTEN; + err = 0; + +done: + release_sock(sk); + return err; +} + +/* Accept a pending connection */ +static int iucv_sock_accept(struct socket *sock, struct socket *newsock, + int flags) +{ + DECLARE_WAITQUEUE(wait, current); + struct sock *sk = sock->sk, *nsk; + long timeo; + int err = 0; + + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_state != IUCV_LISTEN) { + err = -EBADFD; + goto done; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + /* Wait for an incoming connection */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (!(nsk = iucv_accept_dequeue(sk, newsock))) { + set_current_state(TASK_INTERRUPTIBLE); + if (!timeo) { + err = -EAGAIN; + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_state != IUCV_LISTEN) { + err = -EBADFD; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + +done: + release_sock(sk); + return err; +} + +static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, + int *len, int peer) +{ + struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + + addr->sa_family = AF_IUCV; + *len = sizeof(struct sockaddr_iucv); + + if (peer) { + memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); + memcpy(siucv->siucv_name, iucv->dst_name, 8); + } else { + memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); + memcpy(siucv->siucv_name, iucv->src_name, 8); + } + memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); + memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); + memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); + + return 0; +} + +/** + * iucv_send_iprm() - Send socket data in parameter list of an iucv message. + * @path: IUCV path + * @msg: Pointer to a struct iucv_message + * @skb: The socket data to send, skb->len MUST BE <= 7 + * + * Send the socket data in the parameter list in the iucv message + * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter + * list and the socket data len at index 7 (last byte). + * See also iucv_msg_length(). + * + * Returns the error code from the iucv_message_send() call. + */ +static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, + struct sk_buff *skb) +{ + u8 prmdata[8]; + + memcpy(prmdata, (void *) skb->data, skb->len); + prmdata[7] = 0xff - (u8) skb->len; + return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, + (void *) prmdata, 8); +} + +static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + struct sk_buff *skb; + struct iucv_message txmsg; + struct cmsghdr *cmsg; + int cmsg_done; + long timeo; + char user_id[9]; + char appl_id[9]; + int err; + int noblock = msg->msg_flags & MSG_DONTWAIT; + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + /* SOCK_SEQPACKET: we do not support segmented records */ + if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) + return -EOPNOTSUPP; + + lock_sock(sk); + + if (sk->sk_shutdown & SEND_SHUTDOWN) { + err = -EPIPE; + goto out; + } + + /* Return if the socket is not in connected state */ + if (sk->sk_state != IUCV_CONNECTED) { + err = -ENOTCONN; + goto out; + } + + /* initialize defaults */ + cmsg_done = 0; /* check for duplicate headers */ + txmsg.class = 0; + + /* iterate over control messages */ + for_each_cmsghdr(cmsg, msg) { + if (!CMSG_OK(msg, cmsg)) { + err = -EINVAL; + goto out; + } + + if (cmsg->cmsg_level != SOL_IUCV) + continue; + + if (cmsg->cmsg_type & cmsg_done) { + err = -EINVAL; + goto out; + } + cmsg_done |= cmsg->cmsg_type; + + switch (cmsg->cmsg_type) { + case SCM_IUCV_TRGCLS: + if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { + err = -EINVAL; + goto out; + } + + /* set iucv message target class */ + memcpy(&txmsg.class, + (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); + + break; + + default: + err = -EINVAL; + goto out; + } + } + + /* allocate one skb for each iucv message: + * this is fine for SOCK_SEQPACKET (unless we want to support + * segmented records using the MSG_EOR flag), but + * for SOCK_STREAM we might want to improve it in future */ + if (iucv->transport == AF_IUCV_TRANS_HIPER) + skb = sock_alloc_send_skb(sk, + len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, + noblock, &err); + else + skb = sock_alloc_send_skb(sk, len, noblock, &err); + if (!skb) + goto out; + if (iucv->transport == AF_IUCV_TRANS_HIPER) + skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { + err = -EFAULT; + goto fail; + } + + /* wait if outstanding messages for iucv path has reached */ + timeo = sock_sndtimeo(sk, noblock); + err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); + if (err) + goto fail; + + /* return -ECONNRESET if the socket is no longer connected */ + if (sk->sk_state != IUCV_CONNECTED) { + err = -ECONNRESET; + goto fail; + } + + /* increment and save iucv message tag for msg_completion cbk */ + txmsg.tag = iucv->send_tag++; + IUCV_SKB_CB(skb)->tag = txmsg.tag; + + if (iucv->transport == AF_IUCV_TRANS_HIPER) { + atomic_inc(&iucv->msg_sent); + err = afiucv_hs_send(&txmsg, sk, skb, 0); + if (err) { + atomic_dec(&iucv->msg_sent); + goto fail; + } + goto release; + } + skb_queue_tail(&iucv->send_skb_q, skb); + + if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) + && skb->len <= 7) { + err = iucv_send_iprm(iucv->path, &txmsg, skb); + + /* on success: there is no message_complete callback + * for an IPRMDATA msg; remove skb from send queue */ + if (err == 0) { + skb_unlink(skb, &iucv->send_skb_q); + kfree_skb(skb); + } + + /* this error should never happen since the + * IUCV_IPRMDATA path flag is set... sever path */ + if (err == 0x15) { + pr_iucv->path_sever(iucv->path, NULL); + skb_unlink(skb, &iucv->send_skb_q); + err = -EPIPE; + goto fail; + } + } else + err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, + (void *) skb->data, skb->len); + if (err) { + if (err == 3) { + user_id[8] = 0; + memcpy(user_id, iucv->dst_user_id, 8); + appl_id[8] = 0; + memcpy(appl_id, iucv->dst_name, 8); + pr_err("Application %s on z/VM guest %s" + " exceeds message limit\n", + appl_id, user_id); + err = -EAGAIN; + } else + err = -EPIPE; + skb_unlink(skb, &iucv->send_skb_q); + goto fail; + } + +release: + release_sock(sk); + return len; + +fail: + kfree_skb(skb); +out: + release_sock(sk); + return err; +} + +/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's + * + * Locking: must be called with message_q.lock held + */ +static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) +{ + int dataleft, size, copied = 0; + struct sk_buff *nskb; + + dataleft = len; + while (dataleft) { + if (dataleft >= sk->sk_rcvbuf / 4) + size = sk->sk_rcvbuf / 4; + else + size = dataleft; + + nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); + if (!nskb) + return -ENOMEM; + + /* copy target class to control buffer of new skb */ + IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; + + /* copy data fragment */ + memcpy(nskb->data, skb->data + copied, size); + copied += size; + dataleft -= size; + + skb_reset_transport_header(nskb); + skb_reset_network_header(nskb); + nskb->len = size; + + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); + } + + return 0; +} + +/* iucv_process_message() - Receive a single outstanding IUCV message + * + * Locking: must be called with message_q.lock held + */ +static void iucv_process_message(struct sock *sk, struct sk_buff *skb, + struct iucv_path *path, + struct iucv_message *msg) +{ + int rc; + unsigned int len; + + len = iucv_msg_length(msg); + + /* store msg target class in the second 4 bytes of skb ctrl buffer */ + /* Note: the first 4 bytes are reserved for msg tag */ + IUCV_SKB_CB(skb)->class = msg->class; + + /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ + if ((msg->flags & IUCV_IPRMDATA) && len > 7) { + if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { + skb->data = NULL; + skb->len = 0; + } + } else { + rc = pr_iucv->message_receive(path, msg, + msg->flags & IUCV_IPRMDATA, + skb->data, len, NULL); + if (rc) { + kfree_skb(skb); + return; + } + /* we need to fragment iucv messages for SOCK_STREAM only; + * for SOCK_SEQPACKET, it is only relevant if we support + * record segmentation using MSG_EOR (see also recvmsg()) */ + if (sk->sk_type == SOCK_STREAM && + skb->truesize >= sk->sk_rcvbuf / 4) { + rc = iucv_fragment_skb(sk, skb, len); + kfree_skb(skb); + skb = NULL; + if (rc) { + pr_iucv->path_sever(path, NULL); + return; + } + skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + } else { + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb->len = len; + } + } + + IUCV_SKB_CB(skb)->offset = 0; + if (sock_queue_rcv_skb(sk, skb)) + skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); +} + +/* iucv_process_message_q() - Process outstanding IUCV messages + * + * Locking: must be called with message_q.lock held + */ +static void iucv_process_message_q(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct sk_buff *skb; + struct sock_msg_q *p, *n; + + list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { + skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); + if (!skb) + break; + iucv_process_message(sk, skb, p->path, &p->msg); + list_del(&p->list); + kfree(p); + if (!skb_queue_empty(&iucv->backlog_skb_q)) + break; + } +} + +static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + int noblock = flags & MSG_DONTWAIT; + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + unsigned int copied, rlen; + struct sk_buff *skb, *rskb, *cskb; + int err = 0; + u32 offset; + + if ((sk->sk_state == IUCV_DISCONN) && + skb_queue_empty(&iucv->backlog_skb_q) && + skb_queue_empty(&sk->sk_receive_queue) && + list_empty(&iucv->message_q.list)) + return 0; + + if (flags & (MSG_OOB)) + return -EOPNOTSUPP; + + /* receive/dequeue next skb: + * the function understands MSG_PEEK and, thus, does not dequeue skb */ + skb = skb_recv_datagram(sk, flags, noblock, &err); + if (!skb) { + if (sk->sk_shutdown & RCV_SHUTDOWN) + return 0; + return err; + } + + offset = IUCV_SKB_CB(skb)->offset; + rlen = skb->len - offset; /* real length of skb */ + copied = min_t(unsigned int, rlen, len); + if (!rlen) + sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; + + cskb = skb; + if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { + if (!(flags & MSG_PEEK)) + skb_queue_head(&sk->sk_receive_queue, skb); + return -EFAULT; + } + + /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ + if (sk->sk_type == SOCK_SEQPACKET) { + if (copied < rlen) + msg->msg_flags |= MSG_TRUNC; + /* each iucv message contains a complete record */ + msg->msg_flags |= MSG_EOR; + } + + /* create control message to store iucv msg target class: + * get the trgcls from the control buffer of the skb due to + * fragmentation of original iucv message. */ + err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, + sizeof(IUCV_SKB_CB(skb)->class), + (void *)&IUCV_SKB_CB(skb)->class); + if (err) { + if (!(flags & MSG_PEEK)) + skb_queue_head(&sk->sk_receive_queue, skb); + return err; + } + + /* Mark read part of skb as used */ + if (!(flags & MSG_PEEK)) { + + /* SOCK_STREAM: re-queue skb if it contains unreceived data */ + if (sk->sk_type == SOCK_STREAM) { + if (copied < rlen) { + IUCV_SKB_CB(skb)->offset = offset + copied; + skb_queue_head(&sk->sk_receive_queue, skb); + goto done; + } + } + + kfree_skb(skb); + if (iucv->transport == AF_IUCV_TRANS_HIPER) { + atomic_inc(&iucv->msg_recv); + if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { + WARN_ON(1); + iucv_sock_close(sk); + return -EFAULT; + } + } + + /* Queue backlog skbs */ + spin_lock_bh(&iucv->message_q.lock); + rskb = skb_dequeue(&iucv->backlog_skb_q); + while (rskb) { + IUCV_SKB_CB(rskb)->offset = 0; + if (sock_queue_rcv_skb(sk, rskb)) { + skb_queue_head(&iucv->backlog_skb_q, + rskb); + break; + } else { + rskb = skb_dequeue(&iucv->backlog_skb_q); + } + } + if (skb_queue_empty(&iucv->backlog_skb_q)) { + if (!list_empty(&iucv->message_q.list)) + iucv_process_message_q(sk); + if (atomic_read(&iucv->msg_recv) >= + iucv->msglimit / 2) { + err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); + if (err) { + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + } + } + spin_unlock_bh(&iucv->message_q.lock); + } + +done: + /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ + if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) + copied = rlen; + + return copied; +} + +static inline unsigned int iucv_accept_poll(struct sock *parent) +{ + struct iucv_sock *isk, *n; + struct sock *sk; + + list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { + sk = (struct sock *) isk; + + if (sk->sk_state == IUCV_CONNECTED) + return POLLIN | POLLRDNORM; + } + + return 0; +} + +unsigned int iucv_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + unsigned int mask = 0; + + sock_poll_wait(file, sk_sleep(sk), wait); + + if (sk->sk_state == IUCV_LISTEN) + return iucv_accept_poll(sk); + + if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + mask |= POLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLRDHUP; + + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + if (!skb_queue_empty(&sk->sk_receive_queue) || + (sk->sk_shutdown & RCV_SHUTDOWN)) + mask |= POLLIN | POLLRDNORM; + + if (sk->sk_state == IUCV_CLOSED) + mask |= POLLHUP; + + if (sk->sk_state == IUCV_DISCONN) + mask |= POLLIN; + + if (sock_writeable(sk) && iucv_below_msglim(sk)) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + else + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + + return mask; +} + +static int iucv_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + struct iucv_message txmsg; + int err = 0; + + how++; + + if ((how & ~SHUTDOWN_MASK) || !how) + return -EINVAL; + + lock_sock(sk); + switch (sk->sk_state) { + case IUCV_LISTEN: + case IUCV_DISCONN: + case IUCV_CLOSING: + case IUCV_CLOSED: + err = -ENOTCONN; + goto fail; + default: + break; + } + + if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { + if (iucv->transport == AF_IUCV_TRANS_IUCV) { + txmsg.class = 0; + txmsg.tag = 0; + err = pr_iucv->message_send(iucv->path, &txmsg, + IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); + if (err) { + switch (err) { + case 1: + err = -ENOTCONN; + break; + case 2: + err = -ECONNRESET; + break; + default: + err = -ENOTCONN; + break; + } + } + } else + iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); + } + + sk->sk_shutdown |= how; + if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { + if ((iucv->transport == AF_IUCV_TRANS_IUCV) && + iucv->path) { + err = pr_iucv->path_quiesce(iucv->path, NULL); + if (err) + err = -ENOTCONN; +/* skb_queue_purge(&sk->sk_receive_queue); */ + } + skb_queue_purge(&sk->sk_receive_queue); + } + + /* Wake up anyone sleeping in poll */ + sk->sk_state_change(sk); + +fail: + release_sock(sk); + return err; +} + +static int iucv_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + if (!sk) + return 0; + + iucv_sock_close(sk); + + sock_orphan(sk); + iucv_sock_kill(sk); + return err; +} + +/* getsockopt and setsockopt */ +static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + int val; + int rc; + + if (level != SOL_IUCV) + return -ENOPROTOOPT; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (get_user(val, (int __user *) optval)) + return -EFAULT; + + rc = 0; + + lock_sock(sk); + switch (optname) { + case SO_IPRMDATA_MSG: + if (val) + iucv->flags |= IUCV_IPRMDATA; + else + iucv->flags &= ~IUCV_IPRMDATA; + break; + case SO_MSGLIMIT: + switch (sk->sk_state) { + case IUCV_OPEN: + case IUCV_BOUND: + if (val < 1 || val > (u16)(~0)) + rc = -EINVAL; + else + iucv->msglimit = val; + break; + default: + rc = -EINVAL; + break; + } + break; + default: + rc = -ENOPROTOOPT; + break; + } + release_sock(sk); + + return rc; +} + +static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct iucv_sock *iucv = iucv_sk(sk); + unsigned int val; + int len; + + if (level != SOL_IUCV) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + if (len < 0) + return -EINVAL; + + len = min_t(unsigned int, len, sizeof(int)); + + switch (optname) { + case SO_IPRMDATA_MSG: + val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; + break; + case SO_MSGLIMIT: + lock_sock(sk); + val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ + : iucv->msglimit; /* default */ + release_sock(sk); + break; + case SO_MSGSIZE: + if (sk->sk_state == IUCV_OPEN) + return -EBADFD; + val = (iucv->hs_dev) ? iucv->hs_dev->mtu - + sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : + 0x7fffffff; + break; + default: + return -ENOPROTOOPT; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + + +/* Callback wrappers - called from iucv base support */ +static int iucv_callback_connreq(struct iucv_path *path, + u8 ipvmid[8], u8 ipuser[16]) +{ + unsigned char user_data[16]; + unsigned char nuser_data[16]; + unsigned char src_name[8]; + struct sock *sk, *nsk; + struct iucv_sock *iucv, *niucv; + int err; + + memcpy(src_name, ipuser, 8); + EBCASC(src_name, 8); + /* Find out if this path belongs to af_iucv. */ + read_lock(&iucv_sk_list.lock); + iucv = NULL; + sk = NULL; + sk_for_each(sk, &iucv_sk_list.head) + if (sk->sk_state == IUCV_LISTEN && + !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { + /* + * Found a listening socket with + * src_name == ipuser[0-7]. + */ + iucv = iucv_sk(sk); + break; + } + read_unlock(&iucv_sk_list.lock); + if (!iucv) + /* No socket found, not one of our paths. */ + return -EINVAL; + + bh_lock_sock(sk); + + /* Check if parent socket is listening */ + low_nmcpy(user_data, iucv->src_name); + high_nmcpy(user_data, iucv->dst_name); + ASCEBC(user_data, sizeof(user_data)); + if (sk->sk_state != IUCV_LISTEN) { + err = pr_iucv->path_sever(path, user_data); + iucv_path_free(path); + goto fail; + } + + /* Check for backlog size */ + if (sk_acceptq_is_full(sk)) { + err = pr_iucv->path_sever(path, user_data); + iucv_path_free(path); + goto fail; + } + + /* Create the new socket */ + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); + if (!nsk) { + err = pr_iucv->path_sever(path, user_data); + iucv_path_free(path); + goto fail; + } + + niucv = iucv_sk(nsk); + iucv_sock_init(nsk, sk); + + /* Set the new iucv_sock */ + memcpy(niucv->dst_name, ipuser + 8, 8); + EBCASC(niucv->dst_name, 8); + memcpy(niucv->dst_user_id, ipvmid, 8); + memcpy(niucv->src_name, iucv->src_name, 8); + memcpy(niucv->src_user_id, iucv->src_user_id, 8); + niucv->path = path; + + /* Call iucv_accept */ + high_nmcpy(nuser_data, ipuser + 8); + memcpy(nuser_data + 8, niucv->src_name, 8); + ASCEBC(nuser_data + 8, 8); + + /* set message limit for path based on msglimit of accepting socket */ + niucv->msglimit = iucv->msglimit; + path->msglim = iucv->msglimit; + err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); + if (err) { + iucv_sever_path(nsk, 1); + iucv_sock_kill(nsk); + goto fail; + } + + iucv_accept_enqueue(sk, nsk); + + /* Wake up accept */ + nsk->sk_state = IUCV_CONNECTED; + sk->sk_data_ready(sk); + err = 0; +fail: + bh_unlock_sock(sk); + return 0; +} + +static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) +{ + struct sock *sk = path->private; + + sk->sk_state = IUCV_CONNECTED; + sk->sk_state_change(sk); +} + +static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) +{ + struct sock *sk = path->private; + struct iucv_sock *iucv = iucv_sk(sk); + struct sk_buff *skb; + struct sock_msg_q *save_msg; + int len; + + if (sk->sk_shutdown & RCV_SHUTDOWN) { + pr_iucv->message_reject(path, msg); + return; + } + + spin_lock(&iucv->message_q.lock); + + if (!list_empty(&iucv->message_q.list) || + !skb_queue_empty(&iucv->backlog_skb_q)) + goto save_message; + + len = atomic_read(&sk->sk_rmem_alloc); + len += SKB_TRUESIZE(iucv_msg_length(msg)); + if (len > sk->sk_rcvbuf) + goto save_message; + + skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); + if (!skb) + goto save_message; + + iucv_process_message(sk, skb, path, msg); + goto out_unlock; + +save_message: + save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); + if (!save_msg) + goto out_unlock; + save_msg->path = path; + save_msg->msg = *msg; + + list_add_tail(&save_msg->list, &iucv->message_q.list); + +out_unlock: + spin_unlock(&iucv->message_q.lock); +} + +static void iucv_callback_txdone(struct iucv_path *path, + struct iucv_message *msg) +{ + struct sock *sk = path->private; + struct sk_buff *this = NULL; + struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; + struct sk_buff *list_skb = list->next; + unsigned long flags; + + bh_lock_sock(sk); + if (!skb_queue_empty(list)) { + spin_lock_irqsave(&list->lock, flags); + + while (list_skb != (struct sk_buff *)list) { + if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { + this = list_skb; + break; + } + list_skb = list_skb->next; + } + if (this) + __skb_unlink(this, list); + + spin_unlock_irqrestore(&list->lock, flags); + + if (this) { + kfree_skb(this); + /* wake up any process waiting for sending */ + iucv_sock_wake_msglim(sk); + } + } + + if (sk->sk_state == IUCV_CLOSING) { + if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { + sk->sk_state = IUCV_CLOSED; + sk->sk_state_change(sk); + } + } + bh_unlock_sock(sk); + +} + +static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) +{ + struct sock *sk = path->private; + + if (sk->sk_state == IUCV_CLOSED) + return; + + bh_lock_sock(sk); + iucv_sever_path(sk, 1); + sk->sk_state = IUCV_DISCONN; + + sk->sk_state_change(sk); + bh_unlock_sock(sk); +} + +/* called if the other communication side shuts down its RECV direction; + * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. + */ +static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) +{ + struct sock *sk = path->private; + + bh_lock_sock(sk); + if (sk->sk_state != IUCV_CLOSED) { + sk->sk_shutdown |= SEND_SHUTDOWN; + sk->sk_state_change(sk); + } + bh_unlock_sock(sk); +} + +/***************** HiperSockets transport callbacks ********************/ +static void afiucv_swap_src_dest(struct sk_buff *skb) +{ + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + char tmpID[8]; + char tmpName[8]; + + ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); + ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); + ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); + ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); + memcpy(tmpID, trans_hdr->srcUserID, 8); + memcpy(tmpName, trans_hdr->srcAppName, 8); + memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); + memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); + memcpy(trans_hdr->destUserID, tmpID, 8); + memcpy(trans_hdr->destAppName, tmpName, 8); + skb_push(skb, ETH_HLEN); + memset(skb->data, 0, ETH_HLEN); +} + +/** + * afiucv_hs_callback_syn - react on received SYN + **/ +static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) +{ + struct sock *nsk; + struct iucv_sock *iucv, *niucv; + struct af_iucv_trans_hdr *trans_hdr; + int err; + + iucv = iucv_sk(sk); + trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + if (!iucv) { + /* no sock - connection refused */ + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; + err = dev_queue_xmit(skb); + goto out; + } + + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); + bh_lock_sock(sk); + if ((sk->sk_state != IUCV_LISTEN) || + sk_acceptq_is_full(sk) || + !nsk) { + /* error on server socket - connection refused */ + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; + err = dev_queue_xmit(skb); + iucv_sock_kill(nsk); + bh_unlock_sock(sk); + goto out; + } + + niucv = iucv_sk(nsk); + iucv_sock_init(nsk, sk); + niucv->transport = AF_IUCV_TRANS_HIPER; + niucv->msglimit = iucv->msglimit; + if (!trans_hdr->window) + niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; + else + niucv->msglimit_peer = trans_hdr->window; + memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); + memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); + memcpy(niucv->src_name, iucv->src_name, 8); + memcpy(niucv->src_user_id, iucv->src_user_id, 8); + nsk->sk_bound_dev_if = sk->sk_bound_dev_if; + niucv->hs_dev = iucv->hs_dev; + dev_hold(niucv->hs_dev); + afiucv_swap_src_dest(skb); + trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; + trans_hdr->window = niucv->msglimit; + /* if receiver acks the xmit connection is established */ + err = dev_queue_xmit(skb); + if (!err) { + iucv_accept_enqueue(sk, nsk); + nsk->sk_state = IUCV_CONNECTED; + sk->sk_data_ready(sk); + } else + iucv_sock_kill(nsk); + bh_unlock_sock(sk); + +out: + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_synack() - react on received SYN-ACK + **/ +static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + + if (!iucv) + goto out; + if (sk->sk_state != IUCV_BOUND) + goto out; + bh_lock_sock(sk); + iucv->msglimit_peer = trans_hdr->window; + sk->sk_state = IUCV_CONNECTED; + sk->sk_state_change(sk); + bh_unlock_sock(sk); +out: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_synfin() - react on received SYN_FIN + **/ +static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (!iucv) + goto out; + if (sk->sk_state != IUCV_BOUND) + goto out; + bh_lock_sock(sk); + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + bh_unlock_sock(sk); +out: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_fin() - react on received FIN + **/ +static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + /* other end of connection closed */ + if (!iucv) + goto out; + bh_lock_sock(sk); + if (sk->sk_state == IUCV_CONNECTED) { + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + bh_unlock_sock(sk); +out: + kfree_skb(skb); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_win() - react on received WIN + **/ +static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct af_iucv_trans_hdr *trans_hdr = + (struct af_iucv_trans_hdr *)skb->data; + + if (!iucv) + return NET_RX_SUCCESS; + + if (sk->sk_state != IUCV_CONNECTED) + return NET_RX_SUCCESS; + + atomic_sub(trans_hdr->window, &iucv->msg_sent); + iucv_sock_wake_msglim(sk); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_callback_rx() - react on received data + **/ +static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (!iucv) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + if (sk->sk_state != IUCV_CONNECTED) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + if (sk->sk_shutdown & RCV_SHUTDOWN) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + + /* write stuff from iucv_msg to skb cb */ + if (skb->len < sizeof(struct af_iucv_trans_hdr)) { + kfree_skb(skb); + return NET_RX_SUCCESS; + } + skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + IUCV_SKB_CB(skb)->offset = 0; + spin_lock(&iucv->message_q.lock); + if (skb_queue_empty(&iucv->backlog_skb_q)) { + if (sock_queue_rcv_skb(sk, skb)) { + /* handle rcv queue full */ + skb_queue_tail(&iucv->backlog_skb_q, skb); + } + } else + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); + spin_unlock(&iucv->message_q.lock); + return NET_RX_SUCCESS; +} + +/** + * afiucv_hs_rcv() - base function for arriving data through HiperSockets + * transport + * called from netif RX softirq + **/ +static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct sock *sk; + struct iucv_sock *iucv; + struct af_iucv_trans_hdr *trans_hdr; + char nullstring[8]; + int err = 0; + + skb_pull(skb, ETH_HLEN); + trans_hdr = (struct af_iucv_trans_hdr *)skb->data; + EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); + EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); + EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); + EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); + memset(nullstring, 0, sizeof(nullstring)); + iucv = NULL; + sk = NULL; + read_lock(&iucv_sk_list.lock); + sk_for_each(sk, &iucv_sk_list.head) { + if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { + if ((!memcmp(&iucv_sk(sk)->src_name, + trans_hdr->destAppName, 8)) && + (!memcmp(&iucv_sk(sk)->src_user_id, + trans_hdr->destUserID, 8)) && + (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && + (!memcmp(&iucv_sk(sk)->dst_user_id, + nullstring, 8))) { + iucv = iucv_sk(sk); + break; + } + } else { + if ((!memcmp(&iucv_sk(sk)->src_name, + trans_hdr->destAppName, 8)) && + (!memcmp(&iucv_sk(sk)->src_user_id, + trans_hdr->destUserID, 8)) && + (!memcmp(&iucv_sk(sk)->dst_name, + trans_hdr->srcAppName, 8)) && + (!memcmp(&iucv_sk(sk)->dst_user_id, + trans_hdr->srcUserID, 8))) { + iucv = iucv_sk(sk); + break; + } + } + } + read_unlock(&iucv_sk_list.lock); + if (!iucv) + sk = NULL; + + /* no sock + how should we send with no sock + 1) send without sock no send rc checking? + 2) introduce default sock to handle this cases + + SYN -> send SYN|ACK in good case, send SYN|FIN in bad case + data -> send FIN + SYN|ACK, SYN|FIN, FIN -> no action? */ + + switch (trans_hdr->flags) { + case AF_IUCV_FLAG_SYN: + /* connect request */ + err = afiucv_hs_callback_syn(sk, skb); + break; + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): + /* connect request confirmed */ + err = afiucv_hs_callback_synack(sk, skb); + break; + case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): + /* connect request refused */ + err = afiucv_hs_callback_synfin(sk, skb); + break; + case (AF_IUCV_FLAG_FIN): + /* close request */ + err = afiucv_hs_callback_fin(sk, skb); + break; + case (AF_IUCV_FLAG_WIN): + err = afiucv_hs_callback_win(sk, skb); + if (skb->len == sizeof(struct af_iucv_trans_hdr)) { + kfree_skb(skb); + break; + } + /* fall through and receive non-zero length data */ + case (AF_IUCV_FLAG_SHT): + /* shutdown request */ + /* fall through and receive zero length data */ + case 0: + /* plain data frame */ + IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; + err = afiucv_hs_callback_rx(sk, skb); + break; + default: + ; + } + + return err; +} + +/** + * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets + * transport + **/ +static void afiucv_hs_callback_txnotify(struct sk_buff *skb, + enum iucv_tx_notify n) +{ + struct sock *isk = skb->sk; + struct sock *sk = NULL; + struct iucv_sock *iucv = NULL; + struct sk_buff_head *list; + struct sk_buff *list_skb; + struct sk_buff *nskb; + unsigned long flags; + + read_lock_irqsave(&iucv_sk_list.lock, flags); + sk_for_each(sk, &iucv_sk_list.head) + if (sk == isk) { + iucv = iucv_sk(sk); + break; + } + read_unlock_irqrestore(&iucv_sk_list.lock, flags); + + if (!iucv || sock_flag(sk, SOCK_ZAPPED)) + return; + + list = &iucv->send_skb_q; + spin_lock_irqsave(&list->lock, flags); + if (skb_queue_empty(list)) + goto out_unlock; + list_skb = list->next; + nskb = list_skb->next; + while (list_skb != (struct sk_buff *)list) { + if (skb_shinfo(list_skb) == skb_shinfo(skb)) { + switch (n) { + case TX_NOTIFY_OK: + __skb_unlink(list_skb, list); + kfree_skb(list_skb); + iucv_sock_wake_msglim(sk); + break; + case TX_NOTIFY_PENDING: + atomic_inc(&iucv->pendings); + break; + case TX_NOTIFY_DELAYED_OK: + __skb_unlink(list_skb, list); + atomic_dec(&iucv->pendings); + if (atomic_read(&iucv->pendings) <= 0) + iucv_sock_wake_msglim(sk); + kfree_skb(list_skb); + break; + case TX_NOTIFY_UNREACHABLE: + case TX_NOTIFY_DELAYED_UNREACHABLE: + case TX_NOTIFY_TPQFULL: /* not yet used */ + case TX_NOTIFY_GENERALERROR: + case TX_NOTIFY_DELAYED_GENERALERROR: + __skb_unlink(list_skb, list); + kfree_skb(list_skb); + if (sk->sk_state == IUCV_CONNECTED) { + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + break; + } + break; + } + list_skb = nskb; + nskb = nskb->next; + } +out_unlock: + spin_unlock_irqrestore(&list->lock, flags); + + if (sk->sk_state == IUCV_CLOSING) { + if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { + sk->sk_state = IUCV_CLOSED; + sk->sk_state_change(sk); + } + } + +} + +/* + * afiucv_netdev_event: handle netdev notifier chain events + */ +static int afiucv_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + struct sock *sk; + struct iucv_sock *iucv; + + switch (event) { + case NETDEV_REBOOT: + case NETDEV_GOING_DOWN: + sk_for_each(sk, &iucv_sk_list.head) { + iucv = iucv_sk(sk); + if ((iucv->hs_dev == event_dev) && + (sk->sk_state == IUCV_CONNECTED)) { + if (event == NETDEV_GOING_DOWN) + iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); + sk->sk_state = IUCV_DISCONN; + sk->sk_state_change(sk); + } + } + break; + case NETDEV_DOWN: + case NETDEV_UNREGISTER: + default: + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block afiucv_netdev_notifier = { + .notifier_call = afiucv_netdev_event, +}; + +static const struct proto_ops iucv_sock_ops = { + .family = PF_IUCV, + .owner = THIS_MODULE, + .release = iucv_sock_release, + .bind = iucv_sock_bind, + .connect = iucv_sock_connect, + .listen = iucv_sock_listen, + .accept = iucv_sock_accept, + .getname = iucv_sock_getname, + .sendmsg = iucv_sock_sendmsg, + .recvmsg = iucv_sock_recvmsg, + .poll = iucv_sock_poll, + .ioctl = sock_no_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = iucv_sock_shutdown, + .setsockopt = iucv_sock_setsockopt, + .getsockopt = iucv_sock_getsockopt, +}; + +static const struct net_proto_family iucv_sock_family_ops = { + .family = AF_IUCV, + .owner = THIS_MODULE, + .create = iucv_sock_create, +}; + +static struct packet_type iucv_packet_type = { + .type = cpu_to_be16(ETH_P_AF_IUCV), + .func = afiucv_hs_rcv, +}; + +static int afiucv_iucv_init(void) +{ + int err; + + err = pr_iucv->iucv_register(&af_iucv_handler, 0); + if (err) + goto out; + /* establish dummy device */ + af_iucv_driver.bus = pr_iucv->bus; + err = driver_register(&af_iucv_driver); + if (err) + goto out_iucv; + af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!af_iucv_dev) { + err = -ENOMEM; + goto out_driver; + } + dev_set_name(af_iucv_dev, "af_iucv"); + af_iucv_dev->bus = pr_iucv->bus; + af_iucv_dev->parent = pr_iucv->root; + af_iucv_dev->release = (void (*)(struct device *))kfree; + af_iucv_dev->driver = &af_iucv_driver; + err = device_register(af_iucv_dev); + if (err) + goto out_driver; + return 0; + +out_driver: + driver_unregister(&af_iucv_driver); +out_iucv: + pr_iucv->iucv_unregister(&af_iucv_handler, 0); +out: + return err; +} + +static int __init afiucv_init(void) +{ + int err; + + if (MACHINE_IS_VM) { + cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); + if (unlikely(err)) { + WARN_ON(err); + err = -EPROTONOSUPPORT; + goto out; + } + + pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); + if (!pr_iucv) { + printk(KERN_WARNING "iucv_if lookup failed\n"); + memset(&iucv_userid, 0, sizeof(iucv_userid)); + } + } else { + memset(&iucv_userid, 0, sizeof(iucv_userid)); + pr_iucv = NULL; + } + + err = proto_register(&iucv_proto, 0); + if (err) + goto out; + err = sock_register(&iucv_sock_family_ops); + if (err) + goto out_proto; + + if (pr_iucv) { + err = afiucv_iucv_init(); + if (err) + goto out_sock; + } else + register_netdevice_notifier(&afiucv_netdev_notifier); + dev_add_pack(&iucv_packet_type); + return 0; + +out_sock: + sock_unregister(PF_IUCV); +out_proto: + proto_unregister(&iucv_proto); +out: + if (pr_iucv) + symbol_put(iucv_if); + return err; +} + +static void __exit afiucv_exit(void) +{ + if (pr_iucv) { + device_unregister(af_iucv_dev); + driver_unregister(&af_iucv_driver); + pr_iucv->iucv_unregister(&af_iucv_handler, 0); + symbol_put(iucv_if); + } else + unregister_netdevice_notifier(&afiucv_netdev_notifier); + dev_remove_pack(&iucv_packet_type); + sock_unregister(PF_IUCV); + proto_unregister(&iucv_proto); +} + +module_init(afiucv_init); +module_exit(afiucv_exit); + +MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); +MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_IUCV); + diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c new file mode 100644 index 000000000..2a6a1fdd6 --- /dev/null +++ b/net/iucv/iucv.c @@ -0,0 +1,2119 @@ +/* + * IUCV base infrastructure. + * + * Copyright IBM Corp. 2001, 2009 + * + * Author(s): + * Original source: + * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 + * Xenia Tkatschow (xenia@us.ibm.com) + * 2Gb awareness and general cleanup: + * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Rewritten for af_iucv: + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * PM functions: + * Ursula Braun (ursula.braun@de.ibm.com) + * + * Documentation used: + * The original source + * CP Programming Service, IBM document # SC24-5760 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define KMSG_COMPONENT "iucv" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/kernel_stat.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/cpu.h> +#include <linux/reboot.h> +#include <net/iucv/iucv.h> +#include <linux/atomic.h> +#include <asm/ebcdic.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/smp.h> + +/* + * FLAGS: + * All flags are defined in the field IPFLAGS1 of each function + * and can be found in CP Programming Services. + * IPSRCCLS - Indicates you have specified a source class. + * IPTRGCLS - Indicates you have specified a target class. + * IPFGPID - Indicates you have specified a pathid. + * IPFGMID - Indicates you have specified a message ID. + * IPNORPY - Indicates a one-way message. No reply expected. + * IPALL - Indicates that all paths are affected. + */ +#define IUCV_IPSRCCLS 0x01 +#define IUCV_IPTRGCLS 0x01 +#define IUCV_IPFGPID 0x02 +#define IUCV_IPFGMID 0x04 +#define IUCV_IPNORPY 0x10 +#define IUCV_IPALL 0x80 + +static int iucv_bus_match(struct device *dev, struct device_driver *drv) +{ + return 0; +} + +enum iucv_pm_states { + IUCV_PM_INITIAL = 0, + IUCV_PM_FREEZING = 1, + IUCV_PM_THAWING = 2, + IUCV_PM_RESTORING = 3, +}; +static enum iucv_pm_states iucv_pm_state; + +static int iucv_pm_prepare(struct device *); +static void iucv_pm_complete(struct device *); +static int iucv_pm_freeze(struct device *); +static int iucv_pm_thaw(struct device *); +static int iucv_pm_restore(struct device *); + +static const struct dev_pm_ops iucv_pm_ops = { + .prepare = iucv_pm_prepare, + .complete = iucv_pm_complete, + .freeze = iucv_pm_freeze, + .thaw = iucv_pm_thaw, + .restore = iucv_pm_restore, +}; + +struct bus_type iucv_bus = { + .name = "iucv", + .match = iucv_bus_match, + .pm = &iucv_pm_ops, +}; +EXPORT_SYMBOL(iucv_bus); + +struct device *iucv_root; +EXPORT_SYMBOL(iucv_root); + +static int iucv_available; + +/* General IUCV interrupt structure */ +struct iucv_irq_data { + u16 ippathid; + u8 ipflags1; + u8 iptype; + u32 res2[8]; +}; + +struct iucv_irq_list { + struct list_head list; + struct iucv_irq_data data; +}; + +static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; +static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; +static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; + +/* + * Queue of interrupt buffers lock for delivery via the tasklet + * (fast but can't call smp_call_function). + */ +static LIST_HEAD(iucv_task_queue); + +/* + * The tasklet for fast delivery of iucv interrupts. + */ +static void iucv_tasklet_fn(unsigned long); +static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); + +/* + * Queue of interrupt buffers for delivery via a work queue + * (slower but can call smp_call_function). + */ +static LIST_HEAD(iucv_work_queue); + +/* + * The work element to deliver path pending interrupts. + */ +static void iucv_work_fn(struct work_struct *work); +static DECLARE_WORK(iucv_work, iucv_work_fn); + +/* + * Spinlock protecting task and work queue. + */ +static DEFINE_SPINLOCK(iucv_queue_lock); + +enum iucv_command_codes { + IUCV_QUERY = 0, + IUCV_RETRIEVE_BUFFER = 2, + IUCV_SEND = 4, + IUCV_RECEIVE = 5, + IUCV_REPLY = 6, + IUCV_REJECT = 8, + IUCV_PURGE = 9, + IUCV_ACCEPT = 10, + IUCV_CONNECT = 11, + IUCV_DECLARE_BUFFER = 12, + IUCV_QUIESCE = 13, + IUCV_RESUME = 14, + IUCV_SEVER = 15, + IUCV_SETMASK = 16, + IUCV_SETCONTROLMASK = 17, +}; + +/* + * Error messages that are used with the iucv_sever function. They get + * converted to EBCDIC. + */ +static char iucv_error_no_listener[16] = "NO LISTENER"; +static char iucv_error_no_memory[16] = "NO MEMORY"; +static char iucv_error_pathid[16] = "INVALID PATHID"; + +/* + * iucv_handler_list: List of registered handlers. + */ +static LIST_HEAD(iucv_handler_list); + +/* + * iucv_path_table: an array of iucv_path structures. + */ +static struct iucv_path **iucv_path_table; +static unsigned long iucv_max_pathid; + +/* + * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table + */ +static DEFINE_SPINLOCK(iucv_table_lock); + +/* + * iucv_active_cpu: contains the number of the cpu executing the tasklet + * or the work handler. Needed for iucv_path_sever called from tasklet. + */ +static int iucv_active_cpu = -1; + +/* + * Mutex and wait queue for iucv_register/iucv_unregister. + */ +static DEFINE_MUTEX(iucv_register_mutex); + +/* + * Counter for number of non-smp capable handlers. + */ +static int iucv_nonsmp_handler; + +/* + * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, + * iucv_path_quiesce and iucv_path_sever. + */ +struct iucv_cmd_control { + u16 ippathid; + u8 ipflags1; + u8 iprcode; + u16 ipmsglim; + u16 res1; + u8 ipvmid[8]; + u8 ipuser[16]; + u8 iptarget[8]; +} __attribute__ ((packed,aligned(8))); + +/* + * Data in parameter list iucv structure. Used by iucv_message_send, + * iucv_message_send2way and iucv_message_reply. + */ +struct iucv_cmd_dpl { + u16 ippathid; + u8 ipflags1; + u8 iprcode; + u32 ipmsgid; + u32 iptrgcls; + u8 iprmmsg[8]; + u32 ipsrccls; + u32 ipmsgtag; + u32 ipbfadr2; + u32 ipbfln2f; + u32 res; +} __attribute__ ((packed,aligned(8))); + +/* + * Data in buffer iucv structure. Used by iucv_message_receive, + * iucv_message_reject, iucv_message_send, iucv_message_send2way + * and iucv_declare_cpu. + */ +struct iucv_cmd_db { + u16 ippathid; + u8 ipflags1; + u8 iprcode; + u32 ipmsgid; + u32 iptrgcls; + u32 ipbfadr1; + u32 ipbfln1f; + u32 ipsrccls; + u32 ipmsgtag; + u32 ipbfadr2; + u32 ipbfln2f; + u32 res; +} __attribute__ ((packed,aligned(8))); + +/* + * Purge message iucv structure. Used by iucv_message_purge. + */ +struct iucv_cmd_purge { + u16 ippathid; + u8 ipflags1; + u8 iprcode; + u32 ipmsgid; + u8 ipaudit[3]; + u8 res1[5]; + u32 res2; + u32 ipsrccls; + u32 ipmsgtag; + u32 res3[3]; +} __attribute__ ((packed,aligned(8))); + +/* + * Set mask iucv structure. Used by iucv_enable_cpu. + */ +struct iucv_cmd_set_mask { + u8 ipmask; + u8 res1[2]; + u8 iprcode; + u32 res2[9]; +} __attribute__ ((packed,aligned(8))); + +union iucv_param { + struct iucv_cmd_control ctrl; + struct iucv_cmd_dpl dpl; + struct iucv_cmd_db db; + struct iucv_cmd_purge purge; + struct iucv_cmd_set_mask set_mask; +}; + +/* + * Anchor for per-cpu IUCV command parameter block. + */ +static union iucv_param *iucv_param[NR_CPUS]; +static union iucv_param *iucv_param_irq[NR_CPUS]; + +/** + * iucv_call_b2f0 + * @code: identifier of IUCV call to CP. + * @parm: pointer to a struct iucv_parm block + * + * Calls CP to execute IUCV commands. + * + * Returns the result of the CP IUCV call. + */ +static inline int iucv_call_b2f0(int command, union iucv_param *parm) +{ + register unsigned long reg0 asm ("0"); + register unsigned long reg1 asm ("1"); + int ccode; + + reg0 = command; + reg1 = virt_to_phys(parm); + asm volatile( + " .long 0xb2f01000\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) + : "m" (*parm) : "cc"); + return (ccode == 1) ? parm->ctrl.iprcode : ccode; +} + +/** + * iucv_query_maxconn + * + * Determines the maximum number of connections that may be established. + * + * Returns the maximum number of connections or -EPERM is IUCV is not + * available. + */ +static int iucv_query_maxconn(void) +{ + register unsigned long reg0 asm ("0"); + register unsigned long reg1 asm ("1"); + void *param; + int ccode; + + param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); + if (!param) + return -ENOMEM; + reg0 = IUCV_QUERY; + reg1 = (unsigned long) param; + asm volatile ( + " .long 0xb2f01000\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); + if (ccode == 0) + iucv_max_pathid = reg1; + kfree(param); + return ccode ? -EPERM : 0; +} + +/** + * iucv_allow_cpu + * @data: unused + * + * Allow iucv interrupts on this cpu. + */ +static void iucv_allow_cpu(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + + /* + * Enable all iucv interrupts. + * ipmask contains bits for the different interrupts + * 0x80 - Flag to allow nonpriority message pending interrupts + * 0x40 - Flag to allow priority message pending interrupts + * 0x20 - Flag to allow nonpriority message completion interrupts + * 0x10 - Flag to allow priority message completion interrupts + * 0x08 - Flag to allow IUCV control interrupts + */ + parm = iucv_param_irq[cpu]; + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0xf8; + iucv_call_b2f0(IUCV_SETMASK, parm); + + /* + * Enable all iucv control interrupts. + * ipmask contains bits for the different interrupts + * 0x80 - Flag to allow pending connections interrupts + * 0x40 - Flag to allow connection complete interrupts + * 0x20 - Flag to allow connection severed interrupts + * 0x10 - Flag to allow connection quiesced interrupts + * 0x08 - Flag to allow connection resumed interrupts + */ + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0xf8; + iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); + /* Set indication that iucv interrupts are allowed for this cpu. */ + cpumask_set_cpu(cpu, &iucv_irq_cpumask); +} + +/** + * iucv_block_cpu + * @data: unused + * + * Block iucv interrupts on this cpu. + */ +static void iucv_block_cpu(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + + /* Disable all iucv interrupts. */ + parm = iucv_param_irq[cpu]; + memset(parm, 0, sizeof(union iucv_param)); + iucv_call_b2f0(IUCV_SETMASK, parm); + + /* Clear indication that iucv interrupts are allowed for this cpu. */ + cpumask_clear_cpu(cpu, &iucv_irq_cpumask); +} + +/** + * iucv_block_cpu_almost + * @data: unused + * + * Allow connection-severed interrupts only on this cpu. + */ +static void iucv_block_cpu_almost(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + + /* Allow iucv control interrupts only */ + parm = iucv_param_irq[cpu]; + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0x08; + iucv_call_b2f0(IUCV_SETMASK, parm); + /* Allow iucv-severed interrupt only */ + memset(parm, 0, sizeof(union iucv_param)); + parm->set_mask.ipmask = 0x20; + iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); + + /* Clear indication that iucv interrupts are allowed for this cpu. */ + cpumask_clear_cpu(cpu, &iucv_irq_cpumask); +} + +/** + * iucv_declare_cpu + * @data: unused + * + * Declare a interrupt buffer on this cpu. + */ +static void iucv_declare_cpu(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + int rc; + + if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) + return; + + /* Declare interrupt buffer. */ + parm = iucv_param_irq[cpu]; + memset(parm, 0, sizeof(union iucv_param)); + parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); + rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); + if (rc) { + char *err = "Unknown"; + switch (rc) { + case 0x03: + err = "Directory error"; + break; + case 0x0a: + err = "Invalid length"; + break; + case 0x13: + err = "Buffer already exists"; + break; + case 0x3e: + err = "Buffer overlap"; + break; + case 0x5c: + err = "Paging or storage error"; + break; + } + pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", + cpu, rc, err); + return; + } + + /* Set indication that an iucv buffer exists for this cpu. */ + cpumask_set_cpu(cpu, &iucv_buffer_cpumask); + + if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) + /* Enable iucv interrupts on this cpu. */ + iucv_allow_cpu(NULL); + else + /* Disable iucv interrupts on this cpu. */ + iucv_block_cpu(NULL); +} + +/** + * iucv_retrieve_cpu + * @data: unused + * + * Retrieve interrupt buffer on this cpu. + */ +static void iucv_retrieve_cpu(void *data) +{ + int cpu = smp_processor_id(); + union iucv_param *parm; + + if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) + return; + + /* Block iucv interrupts. */ + iucv_block_cpu(NULL); + + /* Retrieve interrupt buffer. */ + parm = iucv_param_irq[cpu]; + iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); + + /* Clear indication that an iucv buffer exists for this cpu. */ + cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); +} + +/** + * iucv_setmask_smp + * + * Allow iucv interrupts on all cpus. + */ +static void iucv_setmask_mp(void) +{ + int cpu; + + get_online_cpus(); + for_each_online_cpu(cpu) + /* Enable all cpus with a declared buffer. */ + if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && + !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) + smp_call_function_single(cpu, iucv_allow_cpu, + NULL, 1); + put_online_cpus(); +} + +/** + * iucv_setmask_up + * + * Allow iucv interrupts on a single cpu. + */ +static void iucv_setmask_up(void) +{ + cpumask_t cpumask; + int cpu; + + /* Disable all cpu but the first in cpu_irq_cpumask. */ + cpumask_copy(&cpumask, &iucv_irq_cpumask); + cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); + for_each_cpu(cpu, &cpumask) + smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); +} + +/** + * iucv_enable + * + * This function makes iucv ready for use. It allocates the pathid + * table, declares an iucv interrupt buffer and enables the iucv + * interrupts. Called when the first user has registered an iucv + * handler. + */ +static int iucv_enable(void) +{ + size_t alloc_size; + int cpu, rc; + + get_online_cpus(); + rc = -ENOMEM; + alloc_size = iucv_max_pathid * sizeof(struct iucv_path); + iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); + if (!iucv_path_table) + goto out; + /* Declare per cpu buffers. */ + rc = -EIO; + for_each_online_cpu(cpu) + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); + if (cpumask_empty(&iucv_buffer_cpumask)) + /* No cpu could declare an iucv buffer. */ + goto out; + put_online_cpus(); + return 0; +out: + kfree(iucv_path_table); + iucv_path_table = NULL; + put_online_cpus(); + return rc; +} + +/** + * iucv_disable + * + * This function shuts down iucv. It disables iucv interrupts, retrieves + * the iucv interrupt buffer and frees the pathid table. Called after the + * last user unregister its iucv handler. + */ +static void iucv_disable(void) +{ + get_online_cpus(); + on_each_cpu(iucv_retrieve_cpu, NULL, 1); + kfree(iucv_path_table); + iucv_path_table = NULL; + put_online_cpus(); +} + +static void free_iucv_data(int cpu) +{ + kfree(iucv_param_irq[cpu]); + iucv_param_irq[cpu] = NULL; + kfree(iucv_param[cpu]); + iucv_param[cpu] = NULL; + kfree(iucv_irq_data[cpu]); + iucv_irq_data[cpu] = NULL; +} + +static int alloc_iucv_data(int cpu) +{ + /* Note: GFP_DMA used to get memory below 2G */ + iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_irq_data[cpu]) + goto out_free; + + /* Allocate parameter blocks. */ + iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_param[cpu]) + goto out_free; + + iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_param_irq[cpu]) + goto out_free; + + return 0; + +out_free: + free_iucv_data(cpu); + return -ENOMEM; +} + +static int iucv_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + cpumask_t cpumask; + long cpu = (long) hcpu; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + if (alloc_iucv_data(cpu)) + return notifier_from_errno(-ENOMEM); + break; + case CPU_UP_CANCELED: + case CPU_UP_CANCELED_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + free_iucv_data(cpu); + break; + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + if (!iucv_path_table) + break; + smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + if (!iucv_path_table) + break; + cpumask_copy(&cpumask, &iucv_buffer_cpumask); + cpumask_clear_cpu(cpu, &cpumask); + if (cpumask_empty(&cpumask)) + /* Can't offline last IUCV enabled cpu. */ + return notifier_from_errno(-EINVAL); + smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); + if (cpumask_empty(&iucv_irq_cpumask)) + smp_call_function_single( + cpumask_first(&iucv_buffer_cpumask), + iucv_allow_cpu, NULL, 1); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __refdata iucv_cpu_notifier = { + .notifier_call = iucv_cpu_notify, +}; + +/** + * iucv_sever_pathid + * @pathid: path identification number. + * @userdata: 16-bytes of user data. + * + * Sever an iucv path to free up the pathid. Used internally. + */ +static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) +{ + union iucv_param *parm; + + parm = iucv_param_irq[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (userdata) + memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); + parm->ctrl.ippathid = pathid; + return iucv_call_b2f0(IUCV_SEVER, parm); +} + +/** + * __iucv_cleanup_queue + * @dummy: unused dummy argument + * + * Nop function called via smp_call_function to force work items from + * pending external iucv interrupts to the work queue. + */ +static void __iucv_cleanup_queue(void *dummy) +{ +} + +/** + * iucv_cleanup_queue + * + * Function called after a path has been severed to find all remaining + * work items for the now stale pathid. The caller needs to hold the + * iucv_table_lock. + */ +static void iucv_cleanup_queue(void) +{ + struct iucv_irq_list *p, *n; + + /* + * When a path is severed, the pathid can be reused immediately + * on a iucv connect or a connection pending interrupt. Remove + * all entries from the task queue that refer to a stale pathid + * (iucv_path_table[ix] == NULL). Only then do the iucv connect + * or deliver the connection pending interrupt. To get all the + * pending interrupts force them to the work queue by calling + * an empty function on all cpus. + */ + smp_call_function(__iucv_cleanup_queue, NULL, 1); + spin_lock_irq(&iucv_queue_lock); + list_for_each_entry_safe(p, n, &iucv_task_queue, list) { + /* Remove stale work items from the task queue. */ + if (iucv_path_table[p->data.ippathid] == NULL) { + list_del(&p->list); + kfree(p); + } + } + spin_unlock_irq(&iucv_queue_lock); +} + +/** + * iucv_register: + * @handler: address of iucv handler structure + * @smp: != 0 indicates that the handler can deal with out of order messages + * + * Registers a driver with IUCV. + * + * Returns 0 on success, -ENOMEM if the memory allocation for the pathid + * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. + */ +int iucv_register(struct iucv_handler *handler, int smp) +{ + int rc; + + if (!iucv_available) + return -ENOSYS; + mutex_lock(&iucv_register_mutex); + if (!smp) + iucv_nonsmp_handler++; + if (list_empty(&iucv_handler_list)) { + rc = iucv_enable(); + if (rc) + goto out_mutex; + } else if (!smp && iucv_nonsmp_handler == 1) + iucv_setmask_up(); + INIT_LIST_HEAD(&handler->paths); + + spin_lock_bh(&iucv_table_lock); + list_add_tail(&handler->list, &iucv_handler_list); + spin_unlock_bh(&iucv_table_lock); + rc = 0; +out_mutex: + mutex_unlock(&iucv_register_mutex); + return rc; +} +EXPORT_SYMBOL(iucv_register); + +/** + * iucv_unregister + * @handler: address of iucv handler structure + * @smp: != 0 indicates that the handler can deal with out of order messages + * + * Unregister driver from IUCV. + */ +void iucv_unregister(struct iucv_handler *handler, int smp) +{ + struct iucv_path *p, *n; + + mutex_lock(&iucv_register_mutex); + spin_lock_bh(&iucv_table_lock); + /* Remove handler from the iucv_handler_list. */ + list_del_init(&handler->list); + /* Sever all pathids still referring to the handler. */ + list_for_each_entry_safe(p, n, &handler->paths, list) { + iucv_sever_pathid(p->pathid, NULL); + iucv_path_table[p->pathid] = NULL; + list_del(&p->list); + iucv_path_free(p); + } + spin_unlock_bh(&iucv_table_lock); + if (!smp) + iucv_nonsmp_handler--; + if (list_empty(&iucv_handler_list)) + iucv_disable(); + else if (!smp && iucv_nonsmp_handler == 0) + iucv_setmask_mp(); + mutex_unlock(&iucv_register_mutex); +} +EXPORT_SYMBOL(iucv_unregister); + +static int iucv_reboot_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int i; + + if (cpumask_empty(&iucv_irq_cpumask)) + return NOTIFY_DONE; + + get_online_cpus(); + on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); + preempt_disable(); + for (i = 0; i < iucv_max_pathid; i++) { + if (iucv_path_table[i]) + iucv_sever_pathid(i, NULL); + } + preempt_enable(); + put_online_cpus(); + iucv_disable(); + return NOTIFY_DONE; +} + +static struct notifier_block iucv_reboot_notifier = { + .notifier_call = iucv_reboot_event, +}; + +/** + * iucv_path_accept + * @path: address of iucv path structure + * @handler: address of iucv handler structure + * @userdata: 16 bytes of data reflected to the communication partner + * @private: private data passed to interrupt handlers for this path + * + * This function is issued after the user received a connection pending + * external interrupt and now wishes to complete the IUCV communication path. + * + * Returns the result of the CP IUCV call. + */ +int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, + u8 userdata[16], void *private) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + /* Prepare parameter block. */ + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + parm->ctrl.ippathid = path->pathid; + parm->ctrl.ipmsglim = path->msglim; + if (userdata) + memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); + parm->ctrl.ipflags1 = path->flags; + + rc = iucv_call_b2f0(IUCV_ACCEPT, parm); + if (!rc) { + path->private = private; + path->msglim = parm->ctrl.ipmsglim; + path->flags = parm->ctrl.ipflags1; + } +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_path_accept); + +/** + * iucv_path_connect + * @path: address of iucv path structure + * @handler: address of iucv handler structure + * @userid: 8-byte user identification + * @system: 8-byte target system identification + * @userdata: 16 bytes of data reflected to the communication partner + * @private: private data passed to interrupt handlers for this path + * + * This function establishes an IUCV path. Although the connect may complete + * successfully, you are not able to use the path until you receive an IUCV + * Connection Complete external interrupt. + * + * Returns the result of the CP IUCV call. + */ +int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, + u8 userid[8], u8 system[8], u8 userdata[16], + void *private) +{ + union iucv_param *parm; + int rc; + + spin_lock_bh(&iucv_table_lock); + iucv_cleanup_queue(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + parm->ctrl.ipmsglim = path->msglim; + parm->ctrl.ipflags1 = path->flags; + if (userid) { + memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); + ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); + EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); + } + if (system) { + memcpy(parm->ctrl.iptarget, system, + sizeof(parm->ctrl.iptarget)); + ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); + EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); + } + if (userdata) + memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); + + rc = iucv_call_b2f0(IUCV_CONNECT, parm); + if (!rc) { + if (parm->ctrl.ippathid < iucv_max_pathid) { + path->pathid = parm->ctrl.ippathid; + path->msglim = parm->ctrl.ipmsglim; + path->flags = parm->ctrl.ipflags1; + path->handler = handler; + path->private = private; + list_add_tail(&path->list, &handler->paths); + iucv_path_table[path->pathid] = path; + } else { + iucv_sever_pathid(parm->ctrl.ippathid, + iucv_error_pathid); + rc = -EIO; + } + } +out: + spin_unlock_bh(&iucv_table_lock); + return rc; +} +EXPORT_SYMBOL(iucv_path_connect); + +/** + * iucv_path_quiesce: + * @path: address of iucv path structure + * @userdata: 16 bytes of data reflected to the communication partner + * + * This function temporarily suspends incoming messages on an IUCV path. + * You can later reactivate the path by invoking the iucv_resume function. + * + * Returns the result from the CP IUCV call. + */ +int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (userdata) + memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); + parm->ctrl.ippathid = path->pathid; + rc = iucv_call_b2f0(IUCV_QUIESCE, parm); +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_path_quiesce); + +/** + * iucv_path_resume: + * @path: address of iucv path structure + * @userdata: 16 bytes of data reflected to the communication partner + * + * This function resumes incoming messages on an IUCV path that has + * been stopped with iucv_path_quiesce. + * + * Returns the result from the CP IUCV call. + */ +int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (userdata) + memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); + parm->ctrl.ippathid = path->pathid; + rc = iucv_call_b2f0(IUCV_RESUME, parm); +out: + local_bh_enable(); + return rc; +} + +/** + * iucv_path_sever + * @path: address of iucv path structure + * @userdata: 16 bytes of data reflected to the communication partner + * + * This function terminates an IUCV path. + * + * Returns the result from the CP IUCV call. + */ +int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) +{ + int rc; + + preempt_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + if (iucv_active_cpu != smp_processor_id()) + spin_lock_bh(&iucv_table_lock); + rc = iucv_sever_pathid(path->pathid, userdata); + iucv_path_table[path->pathid] = NULL; + list_del_init(&path->list); + if (iucv_active_cpu != smp_processor_id()) + spin_unlock_bh(&iucv_table_lock); +out: + preempt_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_path_sever); + +/** + * iucv_message_purge + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @srccls: source class of message + * + * Cancels a message you have sent. + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, + u32 srccls) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + parm->purge.ippathid = path->pathid; + parm->purge.ipmsgid = msg->id; + parm->purge.ipsrccls = srccls; + parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; + rc = iucv_call_b2f0(IUCV_PURGE, parm); + if (!rc) { + msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; + msg->tag = parm->purge.ipmsgtag; + } +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_purge); + +/** + * iucv_message_receive_iprmdata + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is received (IUCV_IPBUFLST) + * @buffer: address of data buffer or address of struct iucv_array + * @size: length of data buffer + * @residual: + * + * Internal function used by iucv_message_receive and __iucv_message_receive + * to receive RMDATA data stored in struct iucv_message. + */ +static int iucv_message_receive_iprmdata(struct iucv_path *path, + struct iucv_message *msg, + u8 flags, void *buffer, + size_t size, size_t *residual) +{ + struct iucv_array *array; + u8 *rmmsg; + size_t copy; + + /* + * Message is 8 bytes long and has been stored to the + * message descriptor itself. + */ + if (residual) + *residual = abs(size - 8); + rmmsg = msg->rmmsg; + if (flags & IUCV_IPBUFLST) { + /* Copy to struct iucv_array. */ + size = (size < 8) ? size : 8; + for (array = buffer; size > 0; array++) { + copy = min_t(size_t, size, array->length); + memcpy((u8 *)(addr_t) array->address, + rmmsg, copy); + rmmsg += copy; + size -= copy; + } + } else { + /* Copy to direct buffer. */ + memcpy(buffer, rmmsg, min_t(size_t, size, 8)); + } + return 0; +} + +/** + * __iucv_message_receive + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is received (IUCV_IPBUFLST) + * @buffer: address of data buffer or address of struct iucv_array + * @size: length of data buffer + * @residual: + * + * This function receives messages that are being sent to you over + * established paths. This function will deal with RMDATA messages + * embedded in struct iucv_message as well. + * + * Locking: no locking + * + * Returns the result from the CP IUCV call. + */ +int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *buffer, size_t size, size_t *residual) +{ + union iucv_param *parm; + int rc; + + if (msg->flags & IUCV_IPRMDATA) + return iucv_message_receive_iprmdata(path, msg, flags, + buffer, size, residual); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + parm->db.ipbfadr1 = (u32)(addr_t) buffer; + parm->db.ipbfln1f = (u32) size; + parm->db.ipmsgid = msg->id; + parm->db.ippathid = path->pathid; + parm->db.iptrgcls = msg->class; + parm->db.ipflags1 = (flags | IUCV_IPFGPID | + IUCV_IPFGMID | IUCV_IPTRGCLS); + rc = iucv_call_b2f0(IUCV_RECEIVE, parm); + if (!rc || rc == 5) { + msg->flags = parm->db.ipflags1; + if (residual) + *residual = parm->db.ipbfln1f; + } +out: + return rc; +} +EXPORT_SYMBOL(__iucv_message_receive); + +/** + * iucv_message_receive + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is received (IUCV_IPBUFLST) + * @buffer: address of data buffer or address of struct iucv_array + * @size: length of data buffer + * @residual: + * + * This function receives messages that are being sent to you over + * established paths. This function will deal with RMDATA messages + * embedded in struct iucv_message as well. + * + * Locking: local_bh_enable/local_bh_disable + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *buffer, size_t size, size_t *residual) +{ + int rc; + + if (msg->flags & IUCV_IPRMDATA) + return iucv_message_receive_iprmdata(path, msg, flags, + buffer, size, residual); + local_bh_disable(); + rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_receive); + +/** + * iucv_message_reject + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * + * The reject function refuses a specified message. Between the time you + * are notified of a message and the time that you complete the message, + * the message may be rejected. + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + parm->db.ippathid = path->pathid; + parm->db.ipmsgid = msg->id; + parm->db.iptrgcls = msg->class; + parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); + rc = iucv_call_b2f0(IUCV_REJECT, parm); +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_reject); + +/** + * iucv_message_reply + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) + * @reply: address of reply data buffer or address of struct iucv_array + * @size: length of reply data buffer + * + * This function responds to the two-way messages that you receive. You + * must identify completely the message to which you wish to reply. ie, + * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into + * the parameter list. + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, + u8 flags, void *reply, size_t size) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (flags & IUCV_IPRMDATA) { + parm->dpl.ippathid = path->pathid; + parm->dpl.ipflags1 = flags; + parm->dpl.ipmsgid = msg->id; + parm->dpl.iptrgcls = msg->class; + memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); + } else { + parm->db.ipbfadr1 = (u32)(addr_t) reply; + parm->db.ipbfln1f = (u32) size; + parm->db.ippathid = path->pathid; + parm->db.ipflags1 = flags; + parm->db.ipmsgid = msg->id; + parm->db.iptrgcls = msg->class; + } + rc = iucv_call_b2f0(IUCV_REPLY, parm); +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_reply); + +/** + * __iucv_message_send + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) + * @srccls: source class of message + * @buffer: address of send buffer or address of struct iucv_array + * @size: length of send buffer + * + * This function transmits data to another application. Data to be + * transmitted is in a buffer and this is a one-way message and the + * receiver will not reply to the message. + * + * Locking: no locking + * + * Returns the result from the CP IUCV call. + */ +int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size) +{ + union iucv_param *parm; + int rc; + + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (flags & IUCV_IPRMDATA) { + /* Message of 8 bytes can be placed into the parameter list. */ + parm->dpl.ippathid = path->pathid; + parm->dpl.ipflags1 = flags | IUCV_IPNORPY; + parm->dpl.iptrgcls = msg->class; + parm->dpl.ipsrccls = srccls; + parm->dpl.ipmsgtag = msg->tag; + memcpy(parm->dpl.iprmmsg, buffer, 8); + } else { + parm->db.ipbfadr1 = (u32)(addr_t) buffer; + parm->db.ipbfln1f = (u32) size; + parm->db.ippathid = path->pathid; + parm->db.ipflags1 = flags | IUCV_IPNORPY; + parm->db.iptrgcls = msg->class; + parm->db.ipsrccls = srccls; + parm->db.ipmsgtag = msg->tag; + } + rc = iucv_call_b2f0(IUCV_SEND, parm); + if (!rc) + msg->id = parm->db.ipmsgid; +out: + return rc; +} +EXPORT_SYMBOL(__iucv_message_send); + +/** + * iucv_message_send + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) + * @srccls: source class of message + * @buffer: address of send buffer or address of struct iucv_array + * @size: length of send buffer + * + * This function transmits data to another application. Data to be + * transmitted is in a buffer and this is a one-way message and the + * receiver will not reply to the message. + * + * Locking: local_bh_enable/local_bh_disable + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size) +{ + int rc; + + local_bh_disable(); + rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_send); + +/** + * iucv_message_send2way + * @path: address of iucv path structure + * @msg: address of iucv msg structure + * @flags: how the message is sent and the reply is received + * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) + * @srccls: source class of message + * @buffer: address of send buffer or address of struct iucv_array + * @size: length of send buffer + * @ansbuf: address of answer buffer or address of struct iucv_array + * @asize: size of reply buffer + * + * This function transmits data to another application. Data to be + * transmitted is in a buffer. The receiver of the send is expected to + * reply to the message and a buffer is provided into which IUCV moves + * the reply to this message. + * + * Returns the result from the CP IUCV call. + */ +int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, + u8 flags, u32 srccls, void *buffer, size_t size, + void *answer, size_t asize, size_t *residual) +{ + union iucv_param *parm; + int rc; + + local_bh_disable(); + if (cpumask_empty(&iucv_buffer_cpumask)) { + rc = -EIO; + goto out; + } + parm = iucv_param[smp_processor_id()]; + memset(parm, 0, sizeof(union iucv_param)); + if (flags & IUCV_IPRMDATA) { + parm->dpl.ippathid = path->pathid; + parm->dpl.ipflags1 = path->flags; /* priority message */ + parm->dpl.iptrgcls = msg->class; + parm->dpl.ipsrccls = srccls; + parm->dpl.ipmsgtag = msg->tag; + parm->dpl.ipbfadr2 = (u32)(addr_t) answer; + parm->dpl.ipbfln2f = (u32) asize; + memcpy(parm->dpl.iprmmsg, buffer, 8); + } else { + parm->db.ippathid = path->pathid; + parm->db.ipflags1 = path->flags; /* priority message */ + parm->db.iptrgcls = msg->class; + parm->db.ipsrccls = srccls; + parm->db.ipmsgtag = msg->tag; + parm->db.ipbfadr1 = (u32)(addr_t) buffer; + parm->db.ipbfln1f = (u32) size; + parm->db.ipbfadr2 = (u32)(addr_t) answer; + parm->db.ipbfln2f = (u32) asize; + } + rc = iucv_call_b2f0(IUCV_SEND, parm); + if (!rc) + msg->id = parm->db.ipmsgid; +out: + local_bh_enable(); + return rc; +} +EXPORT_SYMBOL(iucv_message_send2way); + +/** + * iucv_path_pending + * @data: Pointer to external interrupt buffer + * + * Process connection pending work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_path_pending { + u16 ippathid; + u8 ipflags1; + u8 iptype; + u16 ipmsglim; + u16 res1; + u8 ipvmid[8]; + u8 ipuser[16]; + u32 res3; + u8 ippollfg; + u8 res4[3]; +} __packed; + +static void iucv_path_pending(struct iucv_irq_data *data) +{ + struct iucv_path_pending *ipp = (void *) data; + struct iucv_handler *handler; + struct iucv_path *path; + char *error; + + BUG_ON(iucv_path_table[ipp->ippathid]); + /* New pathid, handler found. Create a new path struct. */ + error = iucv_error_no_memory; + path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); + if (!path) + goto out_sever; + path->pathid = ipp->ippathid; + iucv_path_table[path->pathid] = path; + EBCASC(ipp->ipvmid, 8); + + /* Call registered handler until one is found that wants the path. */ + list_for_each_entry(handler, &iucv_handler_list, list) { + if (!handler->path_pending) + continue; + /* + * Add path to handler to allow a call to iucv_path_sever + * inside the path_pending function. If the handler returns + * an error remove the path from the handler again. + */ + list_add(&path->list, &handler->paths); + path->handler = handler; + if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) + return; + list_del(&path->list); + path->handler = NULL; + } + /* No handler wanted the path. */ + iucv_path_table[path->pathid] = NULL; + iucv_path_free(path); + error = iucv_error_no_listener; +out_sever: + iucv_sever_pathid(ipp->ippathid, error); +} + +/** + * iucv_path_complete + * @data: Pointer to external interrupt buffer + * + * Process connection complete work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_path_complete { + u16 ippathid; + u8 ipflags1; + u8 iptype; + u16 ipmsglim; + u16 res1; + u8 res2[8]; + u8 ipuser[16]; + u32 res3; + u8 ippollfg; + u8 res4[3]; +} __packed; + +static void iucv_path_complete(struct iucv_irq_data *data) +{ + struct iucv_path_complete *ipc = (void *) data; + struct iucv_path *path = iucv_path_table[ipc->ippathid]; + + if (path) + path->flags = ipc->ipflags1; + if (path && path->handler && path->handler->path_complete) + path->handler->path_complete(path, ipc->ipuser); +} + +/** + * iucv_path_severed + * @data: Pointer to external interrupt buffer + * + * Process connection severed work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_path_severed { + u16 ippathid; + u8 res1; + u8 iptype; + u32 res2; + u8 res3[8]; + u8 ipuser[16]; + u32 res4; + u8 ippollfg; + u8 res5[3]; +} __packed; + +static void iucv_path_severed(struct iucv_irq_data *data) +{ + struct iucv_path_severed *ips = (void *) data; + struct iucv_path *path = iucv_path_table[ips->ippathid]; + + if (!path || !path->handler) /* Already severed */ + return; + if (path->handler->path_severed) + path->handler->path_severed(path, ips->ipuser); + else { + iucv_sever_pathid(path->pathid, NULL); + iucv_path_table[path->pathid] = NULL; + list_del(&path->list); + iucv_path_free(path); + } +} + +/** + * iucv_path_quiesced + * @data: Pointer to external interrupt buffer + * + * Process connection quiesced work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_path_quiesced { + u16 ippathid; + u8 res1; + u8 iptype; + u32 res2; + u8 res3[8]; + u8 ipuser[16]; + u32 res4; + u8 ippollfg; + u8 res5[3]; +} __packed; + +static void iucv_path_quiesced(struct iucv_irq_data *data) +{ + struct iucv_path_quiesced *ipq = (void *) data; + struct iucv_path *path = iucv_path_table[ipq->ippathid]; + + if (path && path->handler && path->handler->path_quiesced) + path->handler->path_quiesced(path, ipq->ipuser); +} + +/** + * iucv_path_resumed + * @data: Pointer to external interrupt buffer + * + * Process connection resumed work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_path_resumed { + u16 ippathid; + u8 res1; + u8 iptype; + u32 res2; + u8 res3[8]; + u8 ipuser[16]; + u32 res4; + u8 ippollfg; + u8 res5[3]; +} __packed; + +static void iucv_path_resumed(struct iucv_irq_data *data) +{ + struct iucv_path_resumed *ipr = (void *) data; + struct iucv_path *path = iucv_path_table[ipr->ippathid]; + + if (path && path->handler && path->handler->path_resumed) + path->handler->path_resumed(path, ipr->ipuser); +} + +/** + * iucv_message_complete + * @data: Pointer to external interrupt buffer + * + * Process message complete work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_message_complete { + u16 ippathid; + u8 ipflags1; + u8 iptype; + u32 ipmsgid; + u32 ipaudit; + u8 iprmmsg[8]; + u32 ipsrccls; + u32 ipmsgtag; + u32 res; + u32 ipbfln2f; + u8 ippollfg; + u8 res2[3]; +} __packed; + +static void iucv_message_complete(struct iucv_irq_data *data) +{ + struct iucv_message_complete *imc = (void *) data; + struct iucv_path *path = iucv_path_table[imc->ippathid]; + struct iucv_message msg; + + if (path && path->handler && path->handler->message_complete) { + msg.flags = imc->ipflags1; + msg.id = imc->ipmsgid; + msg.audit = imc->ipaudit; + memcpy(msg.rmmsg, imc->iprmmsg, 8); + msg.class = imc->ipsrccls; + msg.tag = imc->ipmsgtag; + msg.length = imc->ipbfln2f; + path->handler->message_complete(path, &msg); + } +} + +/** + * iucv_message_pending + * @data: Pointer to external interrupt buffer + * + * Process message pending work item. Called from tasklet while holding + * iucv_table_lock. + */ +struct iucv_message_pending { + u16 ippathid; + u8 ipflags1; + u8 iptype; + u32 ipmsgid; + u32 iptrgcls; + union { + u32 iprmmsg1_u32; + u8 iprmmsg1[4]; + } ln1msg1; + union { + u32 ipbfln1f; + u8 iprmmsg2[4]; + } ln1msg2; + u32 res1[3]; + u32 ipbfln2f; + u8 ippollfg; + u8 res2[3]; +} __packed; + +static void iucv_message_pending(struct iucv_irq_data *data) +{ + struct iucv_message_pending *imp = (void *) data; + struct iucv_path *path = iucv_path_table[imp->ippathid]; + struct iucv_message msg; + + if (path && path->handler && path->handler->message_pending) { + msg.flags = imp->ipflags1; + msg.id = imp->ipmsgid; + msg.class = imp->iptrgcls; + if (imp->ipflags1 & IUCV_IPRMDATA) { + memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); + msg.length = 8; + } else + msg.length = imp->ln1msg2.ipbfln1f; + msg.reply_size = imp->ipbfln2f; + path->handler->message_pending(path, &msg); + } +} + +/** + * iucv_tasklet_fn: + * + * This tasklet loops over the queue of irq buffers created by + * iucv_external_interrupt, calls the appropriate action handler + * and then frees the buffer. + */ +static void iucv_tasklet_fn(unsigned long ignored) +{ + typedef void iucv_irq_fn(struct iucv_irq_data *); + static iucv_irq_fn *irq_fn[] = { + [0x02] = iucv_path_complete, + [0x03] = iucv_path_severed, + [0x04] = iucv_path_quiesced, + [0x05] = iucv_path_resumed, + [0x06] = iucv_message_complete, + [0x07] = iucv_message_complete, + [0x08] = iucv_message_pending, + [0x09] = iucv_message_pending, + }; + LIST_HEAD(task_queue); + struct iucv_irq_list *p, *n; + + /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ + if (!spin_trylock(&iucv_table_lock)) { + tasklet_schedule(&iucv_tasklet); + return; + } + iucv_active_cpu = smp_processor_id(); + + spin_lock_irq(&iucv_queue_lock); + list_splice_init(&iucv_task_queue, &task_queue); + spin_unlock_irq(&iucv_queue_lock); + + list_for_each_entry_safe(p, n, &task_queue, list) { + list_del_init(&p->list); + irq_fn[p->data.iptype](&p->data); + kfree(p); + } + + iucv_active_cpu = -1; + spin_unlock(&iucv_table_lock); +} + +/** + * iucv_work_fn: + * + * This work function loops over the queue of path pending irq blocks + * created by iucv_external_interrupt, calls the appropriate action + * handler and then frees the buffer. + */ +static void iucv_work_fn(struct work_struct *work) +{ + LIST_HEAD(work_queue); + struct iucv_irq_list *p, *n; + + /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ + spin_lock_bh(&iucv_table_lock); + iucv_active_cpu = smp_processor_id(); + + spin_lock_irq(&iucv_queue_lock); + list_splice_init(&iucv_work_queue, &work_queue); + spin_unlock_irq(&iucv_queue_lock); + + iucv_cleanup_queue(); + list_for_each_entry_safe(p, n, &work_queue, list) { + list_del_init(&p->list); + iucv_path_pending(&p->data); + kfree(p); + } + + iucv_active_cpu = -1; + spin_unlock_bh(&iucv_table_lock); +} + +/** + * iucv_external_interrupt + * @code: irq code + * + * Handles external interrupts coming in from CP. + * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). + */ +static void iucv_external_interrupt(struct ext_code ext_code, + unsigned int param32, unsigned long param64) +{ + struct iucv_irq_data *p; + struct iucv_irq_list *work; + + inc_irq_stat(IRQEXT_IUC); + p = iucv_irq_data[smp_processor_id()]; + if (p->ippathid >= iucv_max_pathid) { + WARN_ON(p->ippathid >= iucv_max_pathid); + iucv_sever_pathid(p->ippathid, iucv_error_no_listener); + return; + } + BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); + work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); + if (!work) { + pr_warn("iucv_external_interrupt: out of memory\n"); + return; + } + memcpy(&work->data, p, sizeof(work->data)); + spin_lock(&iucv_queue_lock); + if (p->iptype == 0x01) { + /* Path pending interrupt. */ + list_add_tail(&work->list, &iucv_work_queue); + schedule_work(&iucv_work); + } else { + /* The other interrupts. */ + list_add_tail(&work->list, &iucv_task_queue); + tasklet_schedule(&iucv_tasklet); + } + spin_unlock(&iucv_queue_lock); +} + +static int iucv_pm_prepare(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_INFO "iucv_pm_prepare\n"); +#endif + if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) + rc = dev->driver->pm->prepare(dev); + return rc; +} + +static void iucv_pm_complete(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_INFO "iucv_pm_complete\n"); +#endif + if (dev->driver && dev->driver->pm && dev->driver->pm->complete) + dev->driver->pm->complete(dev); +} + +/** + * iucv_path_table_empty() - determine if iucv path table is empty + * + * Returns 0 if there are still iucv pathes defined + * 1 if there are no iucv pathes defined + */ +int iucv_path_table_empty(void) +{ + int i; + + for (i = 0; i < iucv_max_pathid; i++) { + if (iucv_path_table[i]) + return 0; + } + return 1; +} + +/** + * iucv_pm_freeze() - Freeze PM callback + * @dev: iucv-based device + * + * disable iucv interrupts + * invoke callback function of the iucv-based driver + * shut down iucv, if no iucv-pathes are established anymore + */ +static int iucv_pm_freeze(struct device *dev) +{ + int cpu; + struct iucv_irq_list *p, *n; + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_freeze\n"); +#endif + if (iucv_pm_state != IUCV_PM_FREEZING) { + for_each_cpu(cpu, &iucv_irq_cpumask) + smp_call_function_single(cpu, iucv_block_cpu_almost, + NULL, 1); + cancel_work_sync(&iucv_work); + list_for_each_entry_safe(p, n, &iucv_work_queue, list) { + list_del_init(&p->list); + iucv_sever_pathid(p->data.ippathid, + iucv_error_no_listener); + kfree(p); + } + } + iucv_pm_state = IUCV_PM_FREEZING; + if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) + rc = dev->driver->pm->freeze(dev); + if (iucv_path_table_empty()) + iucv_disable(); + return rc; +} + +/** + * iucv_pm_thaw() - Thaw PM callback + * @dev: iucv-based device + * + * make iucv ready for use again: allocate path table, declare interrupt buffers + * and enable iucv interrupts + * invoke callback function of the iucv-based driver + */ +static int iucv_pm_thaw(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_thaw\n"); +#endif + iucv_pm_state = IUCV_PM_THAWING; + if (!iucv_path_table) { + rc = iucv_enable(); + if (rc) + goto out; + } + if (cpumask_empty(&iucv_irq_cpumask)) { + if (iucv_nonsmp_handler) + /* enable interrupts on one cpu */ + iucv_allow_cpu(NULL); + else + /* enable interrupts on all cpus */ + iucv_setmask_mp(); + } + if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) + rc = dev->driver->pm->thaw(dev); +out: + return rc; +} + +/** + * iucv_pm_restore() - Restore PM callback + * @dev: iucv-based device + * + * make iucv ready for use again: allocate path table, declare interrupt buffers + * and enable iucv interrupts + * invoke callback function of the iucv-based driver + */ +static int iucv_pm_restore(struct device *dev) +{ + int rc = 0; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); +#endif + if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) + pr_warn("Suspending Linux did not completely close all IUCV connections\n"); + iucv_pm_state = IUCV_PM_RESTORING; + if (cpumask_empty(&iucv_irq_cpumask)) { + rc = iucv_query_maxconn(); + rc = iucv_enable(); + if (rc) + goto out; + } + if (dev->driver && dev->driver->pm && dev->driver->pm->restore) + rc = dev->driver->pm->restore(dev); +out: + return rc; +} + +struct iucv_interface iucv_if = { + .message_receive = iucv_message_receive, + .__message_receive = __iucv_message_receive, + .message_reply = iucv_message_reply, + .message_reject = iucv_message_reject, + .message_send = iucv_message_send, + .__message_send = __iucv_message_send, + .message_send2way = iucv_message_send2way, + .message_purge = iucv_message_purge, + .path_accept = iucv_path_accept, + .path_connect = iucv_path_connect, + .path_quiesce = iucv_path_quiesce, + .path_resume = iucv_path_resume, + .path_sever = iucv_path_sever, + .iucv_register = iucv_register, + .iucv_unregister = iucv_unregister, + .bus = NULL, + .root = NULL, +}; +EXPORT_SYMBOL(iucv_if); + +/** + * iucv_init + * + * Allocates and initializes various data structures. + */ +static int __init iucv_init(void) +{ + int rc; + int cpu; + + if (!MACHINE_IS_VM) { + rc = -EPROTONOSUPPORT; + goto out; + } + ctl_set_bit(0, 1); + rc = iucv_query_maxconn(); + if (rc) + goto out_ctl; + rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); + if (rc) + goto out_ctl; + iucv_root = root_device_register("iucv"); + if (IS_ERR(iucv_root)) { + rc = PTR_ERR(iucv_root); + goto out_int; + } + + cpu_notifier_register_begin(); + + for_each_online_cpu(cpu) { + if (alloc_iucv_data(cpu)) { + rc = -ENOMEM; + goto out_free; + } + } + rc = __register_hotcpu_notifier(&iucv_cpu_notifier); + if (rc) + goto out_free; + + cpu_notifier_register_done(); + + rc = register_reboot_notifier(&iucv_reboot_notifier); + if (rc) + goto out_cpu; + ASCEBC(iucv_error_no_listener, 16); + ASCEBC(iucv_error_no_memory, 16); + ASCEBC(iucv_error_pathid, 16); + iucv_available = 1; + rc = bus_register(&iucv_bus); + if (rc) + goto out_reboot; + iucv_if.root = iucv_root; + iucv_if.bus = &iucv_bus; + return 0; + +out_reboot: + unregister_reboot_notifier(&iucv_reboot_notifier); +out_cpu: + cpu_notifier_register_begin(); + __unregister_hotcpu_notifier(&iucv_cpu_notifier); +out_free: + for_each_possible_cpu(cpu) + free_iucv_data(cpu); + + cpu_notifier_register_done(); + + root_device_unregister(iucv_root); +out_int: + unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); +out_ctl: + ctl_clear_bit(0, 1); +out: + return rc; +} + +/** + * iucv_exit + * + * Frees everything allocated from iucv_init. + */ +static void __exit iucv_exit(void) +{ + struct iucv_irq_list *p, *n; + int cpu; + + spin_lock_irq(&iucv_queue_lock); + list_for_each_entry_safe(p, n, &iucv_task_queue, list) + kfree(p); + list_for_each_entry_safe(p, n, &iucv_work_queue, list) + kfree(p); + spin_unlock_irq(&iucv_queue_lock); + unregister_reboot_notifier(&iucv_reboot_notifier); + cpu_notifier_register_begin(); + __unregister_hotcpu_notifier(&iucv_cpu_notifier); + for_each_possible_cpu(cpu) + free_iucv_data(cpu); + cpu_notifier_register_done(); + root_device_unregister(iucv_root); + bus_unregister(&iucv_bus); + unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); +} + +subsys_initcall(iucv_init); +module_exit(iucv_exit); + +MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); +MODULE_LICENSE("GPL"); |