eCryptfs: Make all miscdev functions use daemon ptr in file private_data
[deliverable/linux.git] / fs / ecryptfs / messaging.c
1 /**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 2004-2008 International Business Machines Corp.
5 * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
6 * Tyler Hicks <tyhicks@ou.edu>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
20 * 02111-1307, USA.
21 */
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/user_namespace.h>
25 #include <linux/nsproxy.h>
26 #include "ecryptfs_kernel.h"
27
28 static LIST_HEAD(ecryptfs_msg_ctx_free_list);
29 static LIST_HEAD(ecryptfs_msg_ctx_alloc_list);
30 static struct mutex ecryptfs_msg_ctx_lists_mux;
31
32 static struct hlist_head *ecryptfs_daemon_hash;
33 struct mutex ecryptfs_daemon_hash_mux;
34 static int ecryptfs_hash_bits;
35 #define ecryptfs_current_euid_hash(uid) \
36 hash_long((unsigned long)current_euid(), ecryptfs_hash_bits)
37
38 static u32 ecryptfs_msg_counter;
39 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
40
41 /**
42 * ecryptfs_acquire_free_msg_ctx
43 * @msg_ctx: The context that was acquired from the free list
44 *
45 * Acquires a context element from the free list and locks the mutex
46 * on the context. Sets the msg_ctx task to current. Returns zero on
47 * success; non-zero on error or upon failure to acquire a free
48 * context element. Must be called with ecryptfs_msg_ctx_lists_mux
49 * held.
50 */
51 static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx)
52 {
53 struct list_head *p;
54 int rc;
55
56 if (list_empty(&ecryptfs_msg_ctx_free_list)) {
57 printk(KERN_WARNING "%s: The eCryptfs free "
58 "context list is empty. It may be helpful to "
59 "specify the ecryptfs_message_buf_len "
60 "parameter to be greater than the current "
61 "value of [%d]\n", __func__, ecryptfs_message_buf_len);
62 rc = -ENOMEM;
63 goto out;
64 }
65 list_for_each(p, &ecryptfs_msg_ctx_free_list) {
66 *msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node);
67 if (mutex_trylock(&(*msg_ctx)->mux)) {
68 (*msg_ctx)->task = current;
69 rc = 0;
70 goto out;
71 }
72 }
73 rc = -ENOMEM;
74 out:
75 return rc;
76 }
77
78 /**
79 * ecryptfs_msg_ctx_free_to_alloc
80 * @msg_ctx: The context to move from the free list to the alloc list
81 *
82 * Must be called with ecryptfs_msg_ctx_lists_mux held.
83 */
84 static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
85 {
86 list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list);
87 msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_PENDING;
88 msg_ctx->counter = ++ecryptfs_msg_counter;
89 }
90
91 /**
92 * ecryptfs_msg_ctx_alloc_to_free
93 * @msg_ctx: The context to move from the alloc list to the free list
94 *
95 * Must be called with ecryptfs_msg_ctx_lists_mux held.
96 */
97 void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
98 {
99 list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
100 if (msg_ctx->msg)
101 kfree(msg_ctx->msg);
102 msg_ctx->msg = NULL;
103 msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE;
104 }
105
106 /**
107 * ecryptfs_find_daemon_by_euid
108 * @daemon: If return value is zero, points to the desired daemon pointer
109 *
110 * Must be called with ecryptfs_daemon_hash_mux held.
111 *
112 * Search the hash list for the current effective user id.
113 *
114 * Returns zero if the user id exists in the list; non-zero otherwise.
115 */
116 int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
117 {
118 struct hlist_node *elem;
119 int rc;
120
121 hlist_for_each_entry(*daemon, elem,
122 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
123 euid_chain) {
124 if ((*daemon)->file->f_cred->euid == current_euid() &&
125 (*daemon)->file->f_cred->user_ns == current_user_ns()) {
126 rc = 0;
127 goto out;
128 }
129 }
130 rc = -EINVAL;
131 out:
132 return rc;
133 }
134
135 /**
136 * ecryptfs_spawn_daemon - Create and initialize a new daemon struct
137 * @daemon: Pointer to set to newly allocated daemon struct
138 * @file: File used when opening /dev/ecryptfs
139 *
140 * Must be called ceremoniously while in possession of
141 * ecryptfs_sacred_daemon_hash_mux
142 *
143 * Returns zero on success; non-zero otherwise
144 */
145 int
146 ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file)
147 {
148 int rc = 0;
149
150 (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL);
151 if (!(*daemon)) {
152 rc = -ENOMEM;
153 printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of "
154 "GFP_KERNEL memory\n", __func__, sizeof(**daemon));
155 goto out;
156 }
157 (*daemon)->file = file;
158 mutex_init(&(*daemon)->mux);
159 INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue);
160 init_waitqueue_head(&(*daemon)->wait);
161 (*daemon)->num_queued_msg_ctx = 0;
162 hlist_add_head(&(*daemon)->euid_chain,
163 &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]);
164 out:
165 return rc;
166 }
167
168 /**
169 * ecryptfs_exorcise_daemon - Destroy the daemon struct
170 *
171 * Must be called ceremoniously while in possession of
172 * ecryptfs_daemon_hash_mux and the daemon's own mux.
173 */
174 int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
175 {
176 struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp;
177 int rc = 0;
178
179 mutex_lock(&daemon->mux);
180 if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ)
181 || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) {
182 rc = -EBUSY;
183 mutex_unlock(&daemon->mux);
184 goto out;
185 }
186 list_for_each_entry_safe(msg_ctx, msg_ctx_tmp,
187 &daemon->msg_ctx_out_queue, daemon_out_list) {
188 list_del(&msg_ctx->daemon_out_list);
189 daemon->num_queued_msg_ctx--;
190 printk(KERN_WARNING "%s: Warning: dropping message that is in "
191 "the out queue of a dying daemon\n", __func__);
192 ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
193 }
194 hlist_del(&daemon->euid_chain);
195 mutex_unlock(&daemon->mux);
196 kzfree(daemon);
197 out:
198 return rc;
199 }
200
201 /**
202 * ecryptfs_process_reponse
203 * @msg: The ecryptfs message received; the caller should sanity check
204 * msg->data_len and free the memory
205 * @seq: The sequence number of the message; must match the sequence
206 * number for the existing message context waiting for this
207 * response
208 *
209 * Processes a response message after sending an operation request to
210 * userspace. Some other process is awaiting this response. Before
211 * sending out its first communications, the other process allocated a
212 * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The
213 * response message contains this index so that we can copy over the
214 * response message into the msg_ctx that the process holds a
215 * reference to. The other process is going to wake up, check to see
216 * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then
217 * proceed to read off and process the response message. Returns zero
218 * upon delivery to desired context element; non-zero upon delivery
219 * failure or error.
220 *
221 * Returns zero on success; non-zero otherwise
222 */
223 int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
224 struct ecryptfs_message *msg, u32 seq)
225 {
226 struct ecryptfs_msg_ctx *msg_ctx;
227 size_t msg_size;
228 int rc;
229
230 if (msg->index >= ecryptfs_message_buf_len) {
231 rc = -EINVAL;
232 printk(KERN_ERR "%s: Attempt to reference "
233 "context buffer at index [%d]; maximum "
234 "allowable is [%d]\n", __func__, msg->index,
235 (ecryptfs_message_buf_len - 1));
236 goto out;
237 }
238 msg_ctx = &ecryptfs_msg_ctx_arr[msg->index];
239 mutex_lock(&msg_ctx->mux);
240 if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) {
241 rc = -EINVAL;
242 printk(KERN_WARNING "%s: Desired context element is not "
243 "pending a response\n", __func__);
244 goto unlock;
245 } else if (msg_ctx->counter != seq) {
246 rc = -EINVAL;
247 printk(KERN_WARNING "%s: Invalid message sequence; "
248 "expected [%d]; received [%d]\n", __func__,
249 msg_ctx->counter, seq);
250 goto unlock;
251 }
252 msg_size = (sizeof(*msg) + msg->data_len);
253 msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL);
254 if (!msg_ctx->msg) {
255 rc = -ENOMEM;
256 printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of "
257 "GFP_KERNEL memory\n", __func__, msg_size);
258 goto unlock;
259 }
260 memcpy(msg_ctx->msg, msg, msg_size);
261 msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE;
262 wake_up_process(msg_ctx->task);
263 rc = 0;
264 unlock:
265 mutex_unlock(&msg_ctx->mux);
266 out:
267 return rc;
268 }
269
270 /**
271 * ecryptfs_send_message_locked
272 * @data: The data to send
273 * @data_len: The length of data
274 * @msg_ctx: The message context allocated for the send
275 *
276 * Must be called with ecryptfs_daemon_hash_mux held.
277 *
278 * Returns zero on success; non-zero otherwise
279 */
280 static int
281 ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type,
282 struct ecryptfs_msg_ctx **msg_ctx)
283 {
284 struct ecryptfs_daemon *daemon;
285 int rc;
286
287 rc = ecryptfs_find_daemon_by_euid(&daemon);
288 if (rc || !daemon) {
289 rc = -ENOTCONN;
290 goto out;
291 }
292 mutex_lock(&ecryptfs_msg_ctx_lists_mux);
293 rc = ecryptfs_acquire_free_msg_ctx(msg_ctx);
294 if (rc) {
295 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
296 printk(KERN_WARNING "%s: Could not claim a free "
297 "context element\n", __func__);
298 goto out;
299 }
300 ecryptfs_msg_ctx_free_to_alloc(*msg_ctx);
301 mutex_unlock(&(*msg_ctx)->mux);
302 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
303 rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, 0,
304 daemon);
305 if (rc)
306 printk(KERN_ERR "%s: Error attempting to send message to "
307 "userspace daemon; rc = [%d]\n", __func__, rc);
308 out:
309 return rc;
310 }
311
312 /**
313 * ecryptfs_send_message
314 * @data: The data to send
315 * @data_len: The length of data
316 * @msg_ctx: The message context allocated for the send
317 *
318 * Grabs ecryptfs_daemon_hash_mux.
319 *
320 * Returns zero on success; non-zero otherwise
321 */
322 int ecryptfs_send_message(char *data, int data_len,
323 struct ecryptfs_msg_ctx **msg_ctx)
324 {
325 int rc;
326
327 mutex_lock(&ecryptfs_daemon_hash_mux);
328 rc = ecryptfs_send_message_locked(data, data_len, ECRYPTFS_MSG_REQUEST,
329 msg_ctx);
330 mutex_unlock(&ecryptfs_daemon_hash_mux);
331 return rc;
332 }
333
334 /**
335 * ecryptfs_wait_for_response
336 * @msg_ctx: The context that was assigned when sending a message
337 * @msg: The incoming message from userspace; not set if rc != 0
338 *
339 * Sleeps until awaken by ecryptfs_receive_message or until the amount
340 * of time exceeds ecryptfs_message_wait_timeout. If zero is
341 * returned, msg will point to a valid message from userspace; a
342 * non-zero value is returned upon failure to receive a message or an
343 * error occurs. Callee must free @msg on success.
344 */
345 int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
346 struct ecryptfs_message **msg)
347 {
348 signed long timeout = ecryptfs_message_wait_timeout * HZ;
349 int rc = 0;
350
351 sleep:
352 timeout = schedule_timeout_interruptible(timeout);
353 mutex_lock(&ecryptfs_msg_ctx_lists_mux);
354 mutex_lock(&msg_ctx->mux);
355 if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_DONE) {
356 if (timeout) {
357 mutex_unlock(&msg_ctx->mux);
358 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
359 goto sleep;
360 }
361 rc = -ENOMSG;
362 } else {
363 *msg = msg_ctx->msg;
364 msg_ctx->msg = NULL;
365 }
366 ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
367 mutex_unlock(&msg_ctx->mux);
368 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
369 return rc;
370 }
371
372 int __init ecryptfs_init_messaging(void)
373 {
374 int i;
375 int rc = 0;
376
377 if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) {
378 ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS;
379 printk(KERN_WARNING "%s: Specified number of users is "
380 "too large, defaulting to [%d] users\n", __func__,
381 ecryptfs_number_of_users);
382 }
383 mutex_init(&ecryptfs_daemon_hash_mux);
384 mutex_lock(&ecryptfs_daemon_hash_mux);
385 ecryptfs_hash_bits = 1;
386 while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
387 ecryptfs_hash_bits++;
388 ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
389 * (1 << ecryptfs_hash_bits)),
390 GFP_KERNEL);
391 if (!ecryptfs_daemon_hash) {
392 rc = -ENOMEM;
393 printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
394 mutex_unlock(&ecryptfs_daemon_hash_mux);
395 goto out;
396 }
397 for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
398 INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
399 mutex_unlock(&ecryptfs_daemon_hash_mux);
400 ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
401 * ecryptfs_message_buf_len),
402 GFP_KERNEL);
403 if (!ecryptfs_msg_ctx_arr) {
404 rc = -ENOMEM;
405 printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
406 goto out;
407 }
408 mutex_init(&ecryptfs_msg_ctx_lists_mux);
409 mutex_lock(&ecryptfs_msg_ctx_lists_mux);
410 ecryptfs_msg_counter = 0;
411 for (i = 0; i < ecryptfs_message_buf_len; i++) {
412 INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node);
413 INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list);
414 mutex_init(&ecryptfs_msg_ctx_arr[i].mux);
415 mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
416 ecryptfs_msg_ctx_arr[i].index = i;
417 ecryptfs_msg_ctx_arr[i].state = ECRYPTFS_MSG_CTX_STATE_FREE;
418 ecryptfs_msg_ctx_arr[i].counter = 0;
419 ecryptfs_msg_ctx_arr[i].task = NULL;
420 ecryptfs_msg_ctx_arr[i].msg = NULL;
421 list_add_tail(&ecryptfs_msg_ctx_arr[i].node,
422 &ecryptfs_msg_ctx_free_list);
423 mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux);
424 }
425 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
426 rc = ecryptfs_init_ecryptfs_miscdev();
427 if (rc)
428 ecryptfs_release_messaging();
429 out:
430 return rc;
431 }
432
433 void ecryptfs_release_messaging(void)
434 {
435 if (ecryptfs_msg_ctx_arr) {
436 int i;
437
438 mutex_lock(&ecryptfs_msg_ctx_lists_mux);
439 for (i = 0; i < ecryptfs_message_buf_len; i++) {
440 mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
441 if (ecryptfs_msg_ctx_arr[i].msg)
442 kfree(ecryptfs_msg_ctx_arr[i].msg);
443 mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux);
444 }
445 kfree(ecryptfs_msg_ctx_arr);
446 mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
447 }
448 if (ecryptfs_daemon_hash) {
449 struct hlist_node *elem;
450 struct ecryptfs_daemon *daemon;
451 int i;
452
453 mutex_lock(&ecryptfs_daemon_hash_mux);
454 for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
455 int rc;
456
457 hlist_for_each_entry(daemon, elem,
458 &ecryptfs_daemon_hash[i],
459 euid_chain) {
460 rc = ecryptfs_exorcise_daemon(daemon);
461 if (rc)
462 printk(KERN_ERR "%s: Error whilst "
463 "attempting to destroy daemon; "
464 "rc = [%d]. Dazed and confused, "
465 "but trying to continue.\n",
466 __func__, rc);
467 }
468 }
469 kfree(ecryptfs_daemon_hash);
470 mutex_unlock(&ecryptfs_daemon_hash_mux);
471 }
472 ecryptfs_destroy_ecryptfs_miscdev();
473 return;
474 }
This page took 0.039821 seconds and 5 git commands to generate.