+static void make_ctrl_response(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req,
+ u32 status, u32 data)
+{
+ RING_IDX idx = vif->ctrl.rsp_prod_pvt;
+ struct xen_netif_ctrl_response rsp = {
+ .id = req->id,
+ .type = req->type,
+ .status = status,
+ .data = data,
+ };
+
+ *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
+ vif->ctrl.rsp_prod_pvt = ++idx;
+}
+
+static void push_ctrl_response(struct xenvif *vif)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
+ if (notify)
+ notify_remote_via_irq(vif->ctrl_irq);
+}
+
+static void process_ctrl_request(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req)
+{
+ u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+ u32 data = 0;
+
+ switch (req->type) {
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
+ status = xenvif_set_hash_alg(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
+ status = xenvif_get_hash_flags(vif, &data);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
+ status = xenvif_set_hash_flags(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
+ status = xenvif_set_hash_key(vif, req->data[0],
+ req->data[1]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
+ status = XEN_NETIF_CTRL_STATUS_SUCCESS;
+ data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
+ status = xenvif_set_hash_mapping_size(vif,
+ req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
+ status = xenvif_set_hash_mapping(vif, req->data[0],
+ req->data[1],
+ req->data[2]);
+ break;
+
+ default:
+ break;
+ }
+
+ make_ctrl_response(vif, req, status, data);
+ push_ctrl_response(vif);
+}
+
+static void xenvif_ctrl_action(struct xenvif *vif)
+{
+ for (;;) {
+ RING_IDX req_prod, req_cons;
+
+ req_prod = vif->ctrl.sring->req_prod;
+ req_cons = vif->ctrl.req_cons;
+
+ /* Make sure we can see requests before we process them. */
+ rmb();
+
+ if (req_cons == req_prod)
+ break;
+
+ while (req_cons != req_prod) {
+ struct xen_netif_ctrl_request req;
+
+ RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
+ req_cons++;
+
+ process_ctrl_request(vif, &req);
+ }
+
+ vif->ctrl.req_cons = req_cons;
+ vif->ctrl.sring->req_event = req_cons + 1;
+ }
+}
+
+static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+{
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
+ return 1;
+
+ return 0;
+}
+
+int xenvif_ctrl_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ for (;;) {
+ wait_event_interruptible(vif->ctrl_wq,
+ xenvif_ctrl_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
+
+ cond_resched();
+ }
+
+ return 0;
+}
+