Orangefs: code sanitation.
[deliverable/linux.git] / fs / orangefs / waitqueue.c
1 /*
2 * (C) 2001 Clemson University and The University of Chicago
3 * (C) 2011 Omnibond Systems
4 *
5 * Changes by Acxiom Corporation to implement generic service_operation()
6 * function, Copyright Acxiom Corporation, 2005.
7 *
8 * See COPYING in top-level directory.
9 */
10
11 /*
12 * In-kernel waitqueue operations.
13 */
14
15 #include "protocol.h"
16 #include "orangefs-kernel.h"
17 #include "orangefs-bufmap.h"
18
19 static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool);
20 static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *);
21
22 /*
23 * What we do in this function is to walk the list of operations that are
24 * present in the request queue and mark them as purged.
25 * NOTE: This is called from the device close after client-core has
26 * guaranteed that no new operations could appear on the list since the
27 * client-core is anyway going to exit.
28 */
29 void purge_waiting_ops(void)
30 {
31 struct orangefs_kernel_op_s *op;
32
33 spin_lock(&orangefs_request_list_lock);
34 list_for_each_entry(op, &orangefs_request_list, list) {
35 gossip_debug(GOSSIP_WAIT_DEBUG,
36 "pvfs2-client-core: purging op tag %llu %s\n",
37 llu(op->tag),
38 get_opname_string(op));
39 set_op_state_purged(op);
40 }
41 spin_unlock(&orangefs_request_list_lock);
42 }
43
44 /*
45 * submits a ORANGEFS operation and waits for it to complete
46 *
47 * Note op->downcall.status will contain the status of the operation (in
48 * errno format), whether provided by pvfs2-client or a result of failure to
49 * service the operation. If the caller wishes to distinguish, then
50 * op->state can be checked to see if it was serviced or not.
51 *
52 * Returns contents of op->downcall.status for convenience
53 */
54 int service_operation(struct orangefs_kernel_op_s *op,
55 const char *op_name,
56 int flags)
57 {
58 long timeout = MAX_SCHEDULE_TIMEOUT;
59 int ret = 0;
60
61 DEFINE_WAIT(wait_entry);
62
63 op->upcall.tgid = current->tgid;
64 op->upcall.pid = current->pid;
65
66 retry_servicing:
67 op->downcall.status = 0;
68 gossip_debug(GOSSIP_WAIT_DEBUG,
69 "%s: %s op:%p: process:%s: pid:%d:\n",
70 __func__,
71 op_name,
72 op,
73 current->comm,
74 current->pid);
75
76 /*
77 * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
78 * acquiring the request_mutex because we're servicing a
79 * high priority remount operation and the request_mutex is
80 * already taken.
81 */
82 if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
83 if (flags & ORANGEFS_OP_INTERRUPTIBLE)
84 ret = mutex_lock_interruptible(&request_mutex);
85 else
86 ret = mutex_lock_killable(&request_mutex);
87 /*
88 * check to see if we were interrupted while waiting for
89 * mutex
90 */
91 if (ret < 0) {
92 op->downcall.status = ret;
93 gossip_debug(GOSSIP_WAIT_DEBUG,
94 "%s: service_operation interrupted.\n",
95 __func__);
96 return ret;
97 }
98 }
99
100 /* queue up the operation */
101 spin_lock(&orangefs_request_list_lock);
102 spin_lock(&op->lock);
103 set_op_state_waiting(op);
104 /* add high priority remount op to the front of the line. */
105 if (flags & ORANGEFS_OP_PRIORITY)
106 list_add(&op->list, &orangefs_request_list);
107 else
108 list_add_tail(&op->list, &orangefs_request_list);
109 spin_unlock(&op->lock);
110 wake_up_interruptible(&orangefs_request_list_waitq);
111 if (!__is_daemon_in_service()) {
112 gossip_debug(GOSSIP_WAIT_DEBUG,
113 "%s:client core is NOT in service.\n",
114 __func__);
115 timeout = op_timeout_secs * HZ;
116 }
117 spin_unlock(&orangefs_request_list_lock);
118
119 if (!(flags & ORANGEFS_OP_NO_MUTEX))
120 mutex_unlock(&request_mutex);
121
122 ret = wait_for_matching_downcall(op, timeout,
123 flags & ORANGEFS_OP_INTERRUPTIBLE);
124
125 gossip_debug(GOSSIP_WAIT_DEBUG,
126 "%s: wait_for_matching_downcall returned %d for %p\n",
127 __func__,
128 ret,
129 op);
130
131 /* got matching downcall; make sure status is in errno format */
132 if (!ret) {
133 spin_unlock(&op->lock);
134 op->downcall.status =
135 orangefs_normalize_to_errno(op->downcall.status);
136 ret = op->downcall.status;
137 goto out;
138 }
139
140 /* failed to get matching downcall */
141 if (ret == -ETIMEDOUT) {
142 gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
143 __func__,
144 op_name);
145 }
146
147 /*
148 * remove a waiting op from the request list or
149 * remove an in-progress op from the in-progress list.
150 */
151 orangefs_clean_up_interrupted_operation(op);
152
153 op->downcall.status = ret;
154 /* retry if operation has not been serviced and if requested */
155 if (ret == -EAGAIN) {
156 op->attempts++;
157 timeout = op_timeout_secs * HZ;
158 gossip_debug(GOSSIP_WAIT_DEBUG,
159 "orangefs: tag %llu (%s)"
160 " -- operation to be retried (%d attempt)\n",
161 llu(op->tag),
162 op_name,
163 op->attempts);
164
165 /*
166 * io ops (ops that use the shared memory buffer) have
167 * to be returned to their caller for a retry. Other ops
168 * can just be recycled here.
169 */
170 if (!op->uses_shared_memory)
171 goto retry_servicing;
172 }
173
174 out:
175 gossip_debug(GOSSIP_WAIT_DEBUG,
176 "orangefs: service_operation %s returning: %d for %p.\n",
177 op_name,
178 ret,
179 op);
180 return ret;
181 }
182
183 /* This can get called on an I/O op if it had a bad service_operation. */
184 bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
185 {
186 u64 tag = op->tag;
187 if (!op_state_in_progress(op))
188 return false;
189
190 op->slot_to_free = op->upcall.req.io.buf_index;
191 memset(&op->upcall, 0, sizeof(op->upcall));
192 memset(&op->downcall, 0, sizeof(op->downcall));
193 op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
194 op->upcall.req.cancel.op_tag = tag;
195 op->downcall.type = ORANGEFS_VFS_OP_INVALID;
196 op->downcall.status = -1;
197 orangefs_new_tag(op);
198
199 spin_lock(&orangefs_request_list_lock);
200 /* orangefs_request_list_lock is enough of a barrier here */
201 if (!__is_daemon_in_service()) {
202 spin_unlock(&orangefs_request_list_lock);
203 return false;
204 }
205 spin_lock(&op->lock);
206 set_op_state_waiting(op);
207 list_add(&op->list, &orangefs_request_list);
208 spin_unlock(&op->lock);
209 spin_unlock(&orangefs_request_list_lock);
210
211 gossip_debug(GOSSIP_WAIT_DEBUG,
212 "Attempting ORANGEFS operation cancellation of tag %llu\n",
213 llu(tag));
214 return true;
215 }
216
217 /*
218 * Change an op to the "given up" state and remove it from its list.
219 */
220 static void
221 orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
222 {
223 /*
224 * handle interrupted cases depending on what state we were in when
225 * the interruption is detected.
226 *
227 * Called with op->lock held.
228 */
229
230 /*
231 * List manipulation code elsewhere will ignore ops that
232 * have been given up upon.
233 */
234 op->op_state |= OP_VFS_STATE_GIVEN_UP;
235
236 if (list_empty(&op->list)) {
237 /* caught copying to/from daemon */
238 BUG_ON(op_state_serviced(op));
239 spin_unlock(&op->lock);
240 wait_for_completion(&op->waitq);
241 } else if (op_state_waiting(op)) {
242 /*
243 * upcall hasn't been read; remove op from upcall request
244 * list.
245 */
246 spin_unlock(&op->lock);
247 spin_lock(&orangefs_request_list_lock);
248 list_del_init(&op->list);
249 spin_unlock(&orangefs_request_list_lock);
250 gossip_debug(GOSSIP_WAIT_DEBUG,
251 "Interrupted: Removed op %p from request_list\n",
252 op);
253 } else if (op_state_in_progress(op)) {
254 /* op must be removed from the in progress htable */
255 spin_unlock(&op->lock);
256 spin_lock(&htable_ops_in_progress_lock);
257 list_del_init(&op->list);
258 spin_unlock(&htable_ops_in_progress_lock);
259 gossip_debug(GOSSIP_WAIT_DEBUG,
260 "Interrupted: Removed op %p"
261 " from htable_ops_in_progress\n",
262 op);
263 } else {
264 spin_unlock(&op->lock);
265 gossip_err("interrupted operation is in a weird state 0x%x\n",
266 op->op_state);
267 }
268 reinit_completion(&op->waitq);
269 }
270
271 /*
272 * Sleeps on waitqueue waiting for matching downcall.
273 * If client-core finishes servicing, then we are good to go.
274 * else if client-core exits, we get woken up here, and retry with a timeout
275 *
276 * When this call returns to the caller, the specified op will no
277 * longer be in either the in_progress hash table or on the request list.
278 *
279 * Returns 0 on success and -errno on failure
280 * Errors are:
281 * EAGAIN in case we want the caller to requeue and try again..
282 * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
283 * operation since client-core seems to be exiting too often
284 * or if we were interrupted.
285 *
286 * Returns with op->lock taken.
287 */
288 static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
289 long timeout,
290 bool interruptible)
291 {
292 long n;
293
294 /*
295 * There's a "schedule_timeout" inside of these wait
296 * primitives, during which the op is out of the hands of the
297 * user process that needs something done and is being
298 * manipulated by the client-core process.
299 */
300 if (interruptible)
301 n = wait_for_completion_interruptible_timeout(&op->waitq,
302 timeout);
303 else
304 n = wait_for_completion_killable_timeout(&op->waitq, timeout);
305
306 spin_lock(&op->lock);
307
308 if (op_state_serviced(op))
309 return 0;
310
311 if (unlikely(n < 0)) {
312 gossip_debug(GOSSIP_WAIT_DEBUG,
313 "*** %s:"
314 " operation interrupted by a signal (tag "
315 "%llu, op %p)\n",
316 __func__,
317 llu(op->tag),
318 op);
319 return -EINTR;
320 }
321 if (op_state_purged(op)) {
322 gossip_debug(GOSSIP_WAIT_DEBUG,
323 "*** %s:"
324 " operation purged (tag "
325 "%llu, %p, att %d)\n",
326 __func__,
327 llu(op->tag),
328 op,
329 op->attempts);
330 return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
331 -EAGAIN :
332 -EIO;
333 }
334 /* must have timed out, then... */
335 gossip_debug(GOSSIP_WAIT_DEBUG,
336 "*** %s:"
337 " operation timed out (tag"
338 " %llu, %p, att %d)\n",
339 __func__,
340 llu(op->tag),
341 op,
342 op->attempts);
343 return -ETIMEDOUT;
344 }
This page took 0.038406 seconds and 6 git commands to generate.