Drivers: hv: vss: switch to using the hvutil_device_state state machine
[deliverable/linux.git] / drivers / hv / hv_snapshot.c
1 /*
2 * An implementation of host initiated guest snapshot.
3 *
4 *
5 * Copyright (C) 2013, Microsoft, Inc.
6 * Author : K. Y. Srinivasan <kys@microsoft.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
15 * NON INFRINGEMENT. See the GNU General Public License for more
16 * details.
17 *
18 */
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/net.h>
22 #include <linux/nls.h>
23 #include <linux/connector.h>
24 #include <linux/workqueue.h>
25 #include <linux/hyperv.h>
26
27 #include "hyperv_vmbus.h"
28
29 #define VSS_MAJOR 5
30 #define VSS_MINOR 0
31 #define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
32
33 #define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
34
35 /*
36 * Global state maintained for transaction that is being processed. For a class
37 * of integration services, including the "VSS service", the specified protocol
38 * is a "request/response" protocol which means that there can only be single
39 * outstanding transaction from the host at any given point in time. We use
40 * this to simplify memory management in this driver - we cache and process
41 * only one message at a time.
42 *
43 * While the request/response protocol is guaranteed by the host, we further
44 * ensure this by serializing packet processing in this driver - we do not
45 * read additional packets from the VMBUs until the current packet is fully
46 * handled.
47 */
48
49 static struct {
50 int state; /* hvutil_device_state */
51 int recv_len; /* number of bytes received. */
52 struct vmbus_channel *recv_channel; /* chn we got the request */
53 u64 recv_req_id; /* request ID. */
54 struct hv_vss_msg *msg; /* current message */
55 void *vss_context; /* for the channel callback */
56 } vss_transaction;
57
58
59 static void vss_respond_to_host(int error);
60
61 static struct cb_id vss_id = { CN_VSS_IDX, CN_VSS_VAL };
62 static const char vss_name[] = "vss_kernel_module";
63 static __u8 *recv_buffer;
64
65 static void vss_send_op(struct work_struct *dummy);
66 static void vss_timeout_func(struct work_struct *dummy);
67
68 static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
69 static DECLARE_WORK(vss_send_op_work, vss_send_op);
70
71 /*
72 * Callback when data is received from user mode.
73 */
74
75 static void vss_timeout_func(struct work_struct *dummy)
76 {
77 /*
78 * Timeout waiting for userspace component to reply happened.
79 */
80 pr_warn("VSS: timeout waiting for daemon to reply\n");
81 vss_respond_to_host(HV_E_FAIL);
82
83 /* Transaction is finished, reset the state. */
84 if (vss_transaction.state > HVUTIL_READY)
85 vss_transaction.state = HVUTIL_READY;
86
87 hv_poll_channel(vss_transaction.vss_context,
88 hv_vss_onchannelcallback);
89 }
90
91 static void
92 vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
93 {
94 struct hv_vss_msg *vss_msg;
95
96 vss_msg = (struct hv_vss_msg *)msg->data;
97
98 /*
99 * Don't process registration messages if we're in the middle of
100 * a transaction processing.
101 */
102 if (vss_transaction.state > HVUTIL_READY &&
103 vss_msg->vss_hdr.operation == VSS_OP_REGISTER)
104 return;
105
106 if (vss_transaction.state == HVUTIL_DEVICE_INIT &&
107 vss_msg->vss_hdr.operation == VSS_OP_REGISTER) {
108 pr_info("VSS daemon registered\n");
109 vss_transaction.state = HVUTIL_READY;
110 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
111 vss_transaction.state = HVUTIL_USERSPACE_RECV;
112 if (cancel_delayed_work_sync(&vss_timeout_work)) {
113 vss_respond_to_host(vss_msg->error);
114 /* Transaction is finished, reset the state. */
115 vss_transaction.state = HVUTIL_READY;
116 hv_poll_channel(vss_transaction.vss_context,
117 hv_vss_onchannelcallback);
118 }
119 } else {
120 /* This is a spurious call! */
121 pr_warn("VSS: Transaction not active\n");
122 return;
123 }
124 }
125
126
127 static void vss_send_op(struct work_struct *dummy)
128 {
129 int op = vss_transaction.msg->vss_hdr.operation;
130 int rc;
131 struct cn_msg *msg;
132 struct hv_vss_msg *vss_msg;
133
134 /* The transaction state is wrong. */
135 if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
136 return;
137
138 msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC);
139 if (!msg)
140 return;
141
142 vss_msg = (struct hv_vss_msg *)msg->data;
143
144 msg->id.idx = CN_VSS_IDX;
145 msg->id.val = CN_VSS_VAL;
146
147 vss_msg->vss_hdr.operation = op;
148 msg->len = sizeof(struct hv_vss_msg);
149
150 vss_transaction.state = HVUTIL_USERSPACE_REQ;
151 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
152 if (rc) {
153 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
154 if (cancel_delayed_work_sync(&vss_timeout_work)) {
155 vss_respond_to_host(HV_E_FAIL);
156 vss_transaction.state = HVUTIL_READY;
157 }
158 }
159
160 kfree(msg);
161
162 return;
163 }
164
165 /*
166 * Send a response back to the host.
167 */
168
169 static void
170 vss_respond_to_host(int error)
171 {
172 struct icmsg_hdr *icmsghdrp;
173 u32 buf_len;
174 struct vmbus_channel *channel;
175 u64 req_id;
176
177 /*
178 * Copy the global state for completing the transaction. Note that
179 * only one transaction can be active at a time.
180 */
181
182 buf_len = vss_transaction.recv_len;
183 channel = vss_transaction.recv_channel;
184 req_id = vss_transaction.recv_req_id;
185
186 icmsghdrp = (struct icmsg_hdr *)
187 &recv_buffer[sizeof(struct vmbuspipe_hdr)];
188
189 if (channel->onchannel_callback == NULL)
190 /*
191 * We have raced with util driver being unloaded;
192 * silently return.
193 */
194 return;
195
196 icmsghdrp->status = error;
197
198 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
199
200 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
201 VM_PKT_DATA_INBAND, 0);
202
203 }
204
205 /*
206 * This callback is invoked when we get a VSS message from the host.
207 * The host ensures that only one VSS transaction can be active at a time.
208 */
209
210 void hv_vss_onchannelcallback(void *context)
211 {
212 struct vmbus_channel *channel = context;
213 u32 recvlen;
214 u64 requestid;
215 struct hv_vss_msg *vss_msg;
216
217
218 struct icmsg_hdr *icmsghdrp;
219 struct icmsg_negotiate *negop = NULL;
220
221 if (vss_transaction.state > HVUTIL_READY) {
222 /*
223 * We will defer processing this callback once
224 * the current transaction is complete.
225 */
226 vss_transaction.vss_context = context;
227 return;
228 }
229 vss_transaction.vss_context = NULL;
230
231 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
232 &requestid);
233
234 if (recvlen > 0) {
235 icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
236 sizeof(struct vmbuspipe_hdr)];
237
238 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
239 vmbus_prep_negotiate_resp(icmsghdrp, negop,
240 recv_buffer, UTIL_FW_VERSION,
241 VSS_VERSION);
242 } else {
243 vss_msg = (struct hv_vss_msg *)&recv_buffer[
244 sizeof(struct vmbuspipe_hdr) +
245 sizeof(struct icmsg_hdr)];
246
247 /*
248 * Stash away this global state for completing the
249 * transaction; note transactions are serialized.
250 */
251
252 vss_transaction.recv_len = recvlen;
253 vss_transaction.recv_channel = channel;
254 vss_transaction.recv_req_id = requestid;
255 vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
256
257 switch (vss_msg->vss_hdr.operation) {
258 /*
259 * Initiate a "freeze/thaw"
260 * operation in the guest.
261 * We respond to the host once
262 * the operation is complete.
263 *
264 * We send the message to the
265 * user space daemon and the
266 * operation is performed in
267 * the daemon.
268 */
269 case VSS_OP_FREEZE:
270 case VSS_OP_THAW:
271 if (vss_transaction.state < HVUTIL_READY) {
272 /* Userspace is not registered yet */
273 vss_respond_to_host(HV_E_FAIL);
274 return;
275 }
276 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
277 schedule_work(&vss_send_op_work);
278 schedule_delayed_work(&vss_timeout_work,
279 VSS_USERSPACE_TIMEOUT);
280 return;
281
282 case VSS_OP_HOT_BACKUP:
283 vss_msg->vss_cf.flags =
284 VSS_HBU_NO_AUTO_RECOVERY;
285 vss_respond_to_host(0);
286 return;
287
288 case VSS_OP_GET_DM_INFO:
289 vss_msg->dm_info.flags = 0;
290 vss_respond_to_host(0);
291 return;
292
293 default:
294 vss_respond_to_host(0);
295 return;
296
297 }
298
299 }
300
301 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
302 | ICMSGHDRFLAG_RESPONSE;
303
304 vmbus_sendpacket(channel, recv_buffer,
305 recvlen, requestid,
306 VM_PKT_DATA_INBAND, 0);
307 }
308
309 }
310
311 int
312 hv_vss_init(struct hv_util_service *srv)
313 {
314 int err;
315
316 err = cn_add_callback(&vss_id, vss_name, vss_cn_callback);
317 if (err)
318 return err;
319 recv_buffer = srv->recv_buffer;
320
321 /*
322 * When this driver loads, the user level daemon that
323 * processes the host requests may not yet be running.
324 * Defer processing channel callbacks until the daemon
325 * has registered.
326 */
327 vss_transaction.state = HVUTIL_DEVICE_INIT;
328
329 return 0;
330 }
331
332 void hv_vss_deinit(void)
333 {
334 vss_transaction.state = HVUTIL_DEVICE_DYING;
335 cn_del_callback(&vss_id);
336 cancel_delayed_work_sync(&vss_timeout_work);
337 cancel_work_sync(&vss_send_op_work);
338 }
This page took 0.038999 seconds and 6 git commands to generate.