diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-06-10 05:30:17 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-06-10 05:30:17 -0300 |
commit | d635711daa98be86d4c7fd01499c34f566b54ccb (patch) | |
tree | aa5cc3760a27c3d57146498cb82fa549547de06c /drivers/net/xen-netback | |
parent | c91265cd0efb83778f015b4d4b1129bd2cfd075e (diff) |
Linux-libre 4.6.2-gnu
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r-- | drivers/net/xen-netback/common.h | 2 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 66 | ||||
-rw-r--r-- | drivers/net/xen-netback/xenbus.c | 91 |
3 files changed, 126 insertions, 33 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 0333ab0fd..f44b38846 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t; struct pending_tx_info { struct xen_netif_tx_request req; /* tx request */ + unsigned int extra_count; /* Callback data for released SKBs. The callback is always * xenvif_zerocopy_callback, desc contains the pending_idx, which is * also an index in pending_tx_info array. It is initialized in @@ -251,6 +252,7 @@ struct xenvif { unsigned int stalled_queues; struct xenbus_watch credit_watch; + struct xenbus_watch mcast_ctrl_watch; spinlock_t lock; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 61b97c34b..4412a57ec 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, + unsigned int extra_count, s8 st); static void push_tx_responses(struct xenvif_queue *queue); @@ -696,19 +697,21 @@ void xenvif_tx_credit_callback(unsigned long data) } static void xenvif_tx_err(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, RING_IDX end) + struct xen_netif_tx_request *txp, + unsigned int extra_count, RING_IDX end) { RING_IDX cons = queue->tx.req_cons; unsigned long flags; do { spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; RING_COPY_REQUEST(&queue->tx, cons++, txp); + extra_count = 0; /* only the first frag can have extras */ } while (1); queue->tx.req_cons = cons; } @@ -724,6 +727,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif) static int xenvif_count_requests(struct xenvif_queue *queue, struct xen_netif_tx_request *first, + unsigned int extra_count, struct xen_netif_tx_request *txp, int work_to_do) { @@ -812,7 +816,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, } while (more_data); if (drop_err) { - xenvif_tx_err(queue, first, cons + slots); + xenvif_tx_err(queue, first, extra_count, cons + slots); return drop_err; } @@ -827,9 +831,10 @@ struct xenvif_tx_cb { #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, - u16 pending_idx, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *mop) + u16 pending_idx, + struct xen_netif_tx_request *txp, + unsigned int extra_count, + struct gnttab_map_grant_ref *mop) { queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), @@ -838,6 +843,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, memcpy(&queue->pending_tx_info[pending_idx].req, txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = extra_count; } static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) @@ -880,7 +886,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); } @@ -893,7 +899,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, 0, + gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); } @@ -1095,8 +1102,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) } static int xenvif_get_extras(struct xenvif_queue *queue, - struct xen_netif_extra_info *extras, - int work_to_do) + struct xen_netif_extra_info *extras, + unsigned int *extra_count, + int work_to_do) { struct xen_netif_extra_info extra; RING_IDX cons = queue->tx.req_cons; @@ -1109,9 +1117,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue, } RING_COPY_REQUEST(&queue->tx, cons, &extra); + + queue->tx.req_cons = ++cons; + (*extra_count)++; + if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { - queue->tx.req_cons = ++cons; netdev_err(queue->vif->dev, "Invalid extra type: %d\n", extra.type); xenvif_fatal_tx_err(queue->vif); @@ -1119,7 +1130,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue, } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); - queue->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; @@ -1294,6 +1304,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; + unsigned int extra_count; u16 pending_idx; RING_IDX idx; int work_to_do; @@ -1330,8 +1341,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); + extra_count = 0; if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xenvif_get_extras(queue, extras, + &extra_count, work_to_do); idx = queue->tx.req_cons; if (unlikely(work_to_do < 0)) @@ -1344,7 +1357,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); - make_tx_response(queue, &txreq, + make_tx_response(queue, &txreq, extra_count, (ret == 0) ? XEN_NETIF_RSP_OKAY : XEN_NETIF_RSP_ERROR); @@ -1358,12 +1371,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; xenvif_mcast_del(queue->vif, extra->u.mcast.addr); - make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); + make_tx_response(queue, &txreq, extra_count, + XEN_NETIF_RSP_OKAY); push_tx_responses(queue); continue; } - ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); + ret = xenvif_count_requests(queue, &txreq, extra_count, + txfrags, work_to_do); if (unlikely(ret < 0)) break; @@ -1372,7 +1387,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(queue->vif->dev, "Bad packet size: %d\n", txreq.size); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); break; } @@ -1397,7 +1412,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (unlikely(skb == NULL)) { netdev_dbg(queue->vif->dev, "Can't allocate a skb in start_xmit.\n"); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); break; } @@ -1416,7 +1431,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) netdev_err(queue->vif->dev, "Can't allocate the frag_list skb.\n"); @@ -1457,13 +1472,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (data_len < txreq.size) { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); + xenvif_tx_create_map_op(queue, pending_idx, &txreq, + extra_count, gop); gop++; } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, - sizeof(txreq)); + memcpy(&queue->pending_tx_info[pending_idx].req, + &txreq, sizeof(txreq)); + queue->pending_tx_info[pending_idx].extra_count = + extra_count; } queue->pending_cons++; @@ -1804,7 +1822,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, &pending_tx_info->req, status); + make_tx_response(queue, &pending_tx_info->req, + pending_tx_info->extra_count, status); /* Release the pending index before pusing the Tx response so * its available before a new Tx request is pushed by the @@ -1821,6 +1840,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, + unsigned int extra_count, s8 st) { RING_IDX i = queue->tx.rsp_prod_pvt; @@ -1830,7 +1850,7 @@ static void make_tx_response(struct xenvif_queue *queue, resp->id = txp->id; resp->status = st; - if (txp->flags & XEN_NETTXF_extra_info) + while (extra_count-- != 0) RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; queue->tx.rsp_prod_pvt = ++i; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 56ebd8267..bd182cd55 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } - /* We support multicast-control. */ + /* We support dynamic multicast-control. */ err = xenbus_printf(xbt, dev->nodename, "feature-multicast-control", "%d", 1); if (err) { @@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } + err = xenbus_printf(xbt, dev->nodename, + "feature-dynamic-multicast-control", + "%d", 1); + if (err) { + message = "writing feature-dynamic-multicast-control"; + goto abort_transaction; + } + err = xenbus_transaction_end(xbt, 0); } while (err == -EAGAIN); @@ -503,8 +511,6 @@ static void set_backend_state(struct backend_info *be, switch (state) { case XenbusStateInitWait: case XenbusStateConnected: - pr_info("%s: prepare for reconnect\n", - be->dev->nodename); backend_switch_state(be, XenbusStateInitWait); break; case XenbusStateClosing: @@ -683,7 +689,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch, } } -static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) +static int xen_register_credit_watch(struct xenbus_device *dev, + struct xenvif *vif) { int err = 0; char *node; @@ -708,7 +715,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) return err; } -static void xen_unregister_watchers(struct xenvif *vif) +static void xen_unregister_credit_watch(struct xenvif *vif) { if (vif->credit_watch.node) { unregister_xenbus_watch(&vif->credit_watch); @@ -717,6 +724,75 @@ static void xen_unregister_watchers(struct xenvif *vif) } } +static void xen_mcast_ctrl_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + struct xenvif *vif = container_of(watch, struct xenvif, + mcast_ctrl_watch); + struct xenbus_device *dev = xenvif_to_xenbus_device(vif); + int val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, + "request-multicast-control", "%d", &val) < 0) + val = 0; + vif->multicast_control = !!val; +} + +static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, + struct xenvif *vif) +{ + int err = 0; + char *node; + unsigned maxlen = strlen(dev->otherend) + + sizeof("/request-multicast-control"); + + if (vif->mcast_ctrl_watch.node) { + pr_err_ratelimited("Watch is already registered\n"); + return -EADDRINUSE; + } + + node = kmalloc(maxlen, GFP_KERNEL); + if (!node) { + pr_err("Failed to allocate memory for watch\n"); + return -ENOMEM; + } + snprintf(node, maxlen, "%s/request-multicast-control", + dev->otherend); + vif->mcast_ctrl_watch.node = node; + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; + err = register_xenbus_watch(&vif->mcast_ctrl_watch); + if (err) { + pr_err("Failed to set watcher %s\n", + vif->mcast_ctrl_watch.node); + kfree(node); + vif->mcast_ctrl_watch.node = NULL; + vif->mcast_ctrl_watch.callback = NULL; + } + return err; +} + +static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif) +{ + if (vif->mcast_ctrl_watch.node) { + unregister_xenbus_watch(&vif->mcast_ctrl_watch); + kfree(vif->mcast_ctrl_watch.node); + vif->mcast_ctrl_watch.node = NULL; + } +} + +static void xen_register_watchers(struct xenbus_device *dev, + struct xenvif *vif) +{ + xen_register_credit_watch(dev, vif); + xen_register_mcast_ctrl_watch(dev, vif); +} + +static void xen_unregister_watchers(struct xenvif *vif) +{ + xen_unregister_mcast_ctrl_watch(vif); + xen_unregister_credit_watch(vif); +} + static void unregister_hotplug_status_watch(struct backend_info *be) { if (be->have_hotplug_status_watch) { @@ -1030,11 +1106,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) val = 0; vif->ipv6_csum = !!val; - if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control", - "%d", &val) < 0) - val = 0; - vif->multicast_control = !!val; - return 0; } |