Commit | Line | Data |
---|---|---|
b93931a6 PO |
1 | /* |
2 | * linux/drivers/mmc/core/host.c | |
3 | * | |
4 | * Copyright (C) 2003 Russell King, All Rights Reserved. | |
ff3112f5 | 5 | * Copyright (C) 2007-2008 Pierre Ossman |
04566831 | 6 | * Copyright (C) 2010 Linus Walleij |
b93931a6 PO |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * MMC host class device management | |
13 | */ | |
14 | ||
15 | #include <linux/device.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/idr.h> | |
18 | #include <linux/pagemap.h> | |
3ef77af1 | 19 | #include <linux/export.h> |
af8350c7 | 20 | #include <linux/leds.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
4c2ef25f | 22 | #include <linux/suspend.h> |
b93931a6 PO |
23 | |
24 | #include <linux/mmc/host.h> | |
04566831 | 25 | #include <linux/mmc/card.h> |
b93931a6 PO |
26 | |
27 | #include "core.h" | |
28 | #include "host.h" | |
29 | ||
30 | #define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev) | |
31 | ||
32 | static void mmc_host_classdev_release(struct device *dev) | |
33 | { | |
34 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
a7d1a1eb | 35 | mutex_destroy(&host->slot.lock); |
b93931a6 PO |
36 | kfree(host); |
37 | } | |
38 | ||
39 | static struct class mmc_host_class = { | |
40 | .name = "mmc_host", | |
41 | .dev_release = mmc_host_classdev_release, | |
42 | }; | |
43 | ||
44 | int mmc_register_host_class(void) | |
45 | { | |
46 | return class_register(&mmc_host_class); | |
47 | } | |
48 | ||
49 | void mmc_unregister_host_class(void) | |
50 | { | |
51 | class_unregister(&mmc_host_class); | |
52 | } | |
53 | ||
54 | static DEFINE_IDR(mmc_host_idr); | |
55 | static DEFINE_SPINLOCK(mmc_host_lock); | |
56 | ||
04566831 | 57 | #ifdef CONFIG_MMC_CLKGATE |
597dd9d7 SRT |
58 | static ssize_t clkgate_delay_show(struct device *dev, |
59 | struct device_attribute *attr, char *buf) | |
60 | { | |
61 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
4137e504 | 62 | return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); |
597dd9d7 SRT |
63 | } |
64 | ||
65 | static ssize_t clkgate_delay_store(struct device *dev, | |
66 | struct device_attribute *attr, const char *buf, size_t count) | |
67 | { | |
68 | struct mmc_host *host = cls_dev_to_mmc_host(dev); | |
69 | unsigned long flags, value; | |
70 | ||
71 | if (kstrtoul(buf, 0, &value)) | |
72 | return -EINVAL; | |
73 | ||
74 | spin_lock_irqsave(&host->clk_lock, flags); | |
75 | host->clkgate_delay = value; | |
76 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
597dd9d7 SRT |
77 | return count; |
78 | } | |
04566831 LW |
79 | |
80 | /* | |
81 | * Enabling clock gating will make the core call out to the host | |
82 | * once up and once down when it performs a request or card operation | |
83 | * intermingled in any fashion. The driver will see this through | |
84 | * set_ios() operations with ios.clock field set to 0 to gate (disable) | |
85 | * the block clock, and to the old frequency to enable it again. | |
86 | */ | |
87 | static void mmc_host_clk_gate_delayed(struct mmc_host *host) | |
88 | { | |
89 | unsigned long tick_ns; | |
90 | unsigned long freq = host->ios.clock; | |
91 | unsigned long flags; | |
92 | ||
93 | if (!freq) { | |
94 | pr_debug("%s: frequency set to 0 in disable function, " | |
95 | "this means the clock is already disabled.\n", | |
96 | mmc_hostname(host)); | |
97 | return; | |
98 | } | |
99 | /* | |
100 | * New requests may have appeared while we were scheduling, | |
101 | * then there is no reason to delay the check before | |
102 | * clk_disable(). | |
103 | */ | |
104 | spin_lock_irqsave(&host->clk_lock, flags); | |
105 | ||
106 | /* | |
107 | * Delay n bus cycles (at least 8 from MMC spec) before attempting | |
108 | * to disable the MCI block clock. The reference count may have | |
109 | * gone up again after this delay due to rescheduling! | |
110 | */ | |
111 | if (!host->clk_requests) { | |
112 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
113 | tick_ns = DIV_ROUND_UP(1000000000, freq); | |
114 | ndelay(host->clk_delay * tick_ns); | |
115 | } else { | |
116 | /* New users appeared while waiting for this work */ | |
117 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
118 | return; | |
119 | } | |
86f315bb | 120 | mutex_lock(&host->clk_gate_mutex); |
04566831 LW |
121 | spin_lock_irqsave(&host->clk_lock, flags); |
122 | if (!host->clk_requests) { | |
123 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
124 | /* This will set host->ios.clock to 0 */ | |
125 | mmc_gate_clock(host); | |
126 | spin_lock_irqsave(&host->clk_lock, flags); | |
127 | pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); | |
128 | } | |
129 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
86f315bb | 130 | mutex_unlock(&host->clk_gate_mutex); |
04566831 LW |
131 | } |
132 | ||
133 | /* | |
134 | * Internal work. Work to disable the clock at some later point. | |
135 | */ | |
136 | static void mmc_host_clk_gate_work(struct work_struct *work) | |
137 | { | |
138 | struct mmc_host *host = container_of(work, struct mmc_host, | |
597dd9d7 | 139 | clk_gate_work.work); |
04566831 LW |
140 | |
141 | mmc_host_clk_gate_delayed(host); | |
142 | } | |
143 | ||
144 | /** | |
08c14071 | 145 | * mmc_host_clk_hold - ungate hardware MCI clocks |
04566831 LW |
146 | * @host: host to ungate. |
147 | * | |
148 | * Makes sure the host ios.clock is restored to a non-zero value | |
149 | * past this call. Increase clock reference count and ungate clock | |
150 | * if we're the first user. | |
151 | */ | |
08c14071 | 152 | void mmc_host_clk_hold(struct mmc_host *host) |
04566831 LW |
153 | { |
154 | unsigned long flags; | |
155 | ||
597dd9d7 SRT |
156 | /* cancel any clock gating work scheduled by mmc_host_clk_release() */ |
157 | cancel_delayed_work_sync(&host->clk_gate_work); | |
86f315bb | 158 | mutex_lock(&host->clk_gate_mutex); |
04566831 LW |
159 | spin_lock_irqsave(&host->clk_lock, flags); |
160 | if (host->clk_gated) { | |
161 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
162 | mmc_ungate_clock(host); | |
163 | spin_lock_irqsave(&host->clk_lock, flags); | |
164 | pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); | |
165 | } | |
166 | host->clk_requests++; | |
167 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
86f315bb | 168 | mutex_unlock(&host->clk_gate_mutex); |
04566831 LW |
169 | } |
170 | ||
171 | /** | |
172 | * mmc_host_may_gate_card - check if this card may be gated | |
173 | * @card: card to check. | |
174 | */ | |
175 | static bool mmc_host_may_gate_card(struct mmc_card *card) | |
176 | { | |
177 | /* If there is no card we may gate it */ | |
178 | if (!card) | |
179 | return true; | |
180 | /* | |
181 | * Don't gate SDIO cards! These need to be clocked at all times | |
182 | * since they may be independent systems generating interrupts | |
183 | * and other events. The clock requests counter from the core will | |
184 | * go down to zero since the core does not need it, but we will not | |
185 | * gate the clock, because there is somebody out there that may still | |
186 | * be using it. | |
187 | */ | |
db993500 | 188 | return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); |
04566831 LW |
189 | } |
190 | ||
191 | /** | |
08c14071 | 192 | * mmc_host_clk_release - gate off hardware MCI clocks |
04566831 LW |
193 | * @host: host to gate. |
194 | * | |
195 | * Calls the host driver with ios.clock set to zero as often as possible | |
196 | * in order to gate off hardware MCI clocks. Decrease clock reference | |
197 | * count and schedule disabling of clock. | |
198 | */ | |
08c14071 | 199 | void mmc_host_clk_release(struct mmc_host *host) |
04566831 LW |
200 | { |
201 | unsigned long flags; | |
202 | ||
203 | spin_lock_irqsave(&host->clk_lock, flags); | |
204 | host->clk_requests--; | |
205 | if (mmc_host_may_gate_card(host->card) && | |
206 | !host->clk_requests) | |
597dd9d7 SRT |
207 | queue_delayed_work(system_nrt_wq, &host->clk_gate_work, |
208 | msecs_to_jiffies(host->clkgate_delay)); | |
04566831 LW |
209 | spin_unlock_irqrestore(&host->clk_lock, flags); |
210 | } | |
211 | ||
212 | /** | |
213 | * mmc_host_clk_rate - get current clock frequency setting | |
214 | * @host: host to get the clock frequency for. | |
215 | * | |
216 | * Returns current clock frequency regardless of gating. | |
217 | */ | |
218 | unsigned int mmc_host_clk_rate(struct mmc_host *host) | |
219 | { | |
220 | unsigned long freq; | |
221 | unsigned long flags; | |
222 | ||
223 | spin_lock_irqsave(&host->clk_lock, flags); | |
224 | if (host->clk_gated) | |
225 | freq = host->clk_old; | |
226 | else | |
227 | freq = host->ios.clock; | |
228 | spin_unlock_irqrestore(&host->clk_lock, flags); | |
229 | return freq; | |
230 | } | |
231 | ||
232 | /** | |
233 | * mmc_host_clk_init - set up clock gating code | |
234 | * @host: host with potential clock to control | |
235 | */ | |
236 | static inline void mmc_host_clk_init(struct mmc_host *host) | |
237 | { | |
238 | host->clk_requests = 0; | |
239 | /* Hold MCI clock for 8 cycles by default */ | |
240 | host->clk_delay = 8; | |
597dd9d7 | 241 | /* |
c84f15ae | 242 | * Default clock gating delay is 0ms to avoid wasting power. |
597dd9d7 SRT |
243 | * This value can be tuned by writing into sysfs entry. |
244 | */ | |
c84f15ae | 245 | host->clkgate_delay = 0; |
04566831 | 246 | host->clk_gated = false; |
597dd9d7 | 247 | INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); |
04566831 | 248 | spin_lock_init(&host->clk_lock); |
86f315bb | 249 | mutex_init(&host->clk_gate_mutex); |
04566831 LW |
250 | } |
251 | ||
252 | /** | |
253 | * mmc_host_clk_exit - shut down clock gating code | |
254 | * @host: host with potential clock to control | |
255 | */ | |
256 | static inline void mmc_host_clk_exit(struct mmc_host *host) | |
257 | { | |
258 | /* | |
259 | * Wait for any outstanding gate and then make sure we're | |
260 | * ungated before exiting. | |
261 | */ | |
597dd9d7 | 262 | if (cancel_delayed_work_sync(&host->clk_gate_work)) |
04566831 LW |
263 | mmc_host_clk_gate_delayed(host); |
264 | if (host->clk_gated) | |
08c14071 | 265 | mmc_host_clk_hold(host); |
c288b855 LW |
266 | /* There should be only one user now */ |
267 | WARN_ON(host->clk_requests > 1); | |
04566831 LW |
268 | } |
269 | ||
597dd9d7 SRT |
270 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
271 | { | |
272 | host->clkgate_delay_attr.show = clkgate_delay_show; | |
273 | host->clkgate_delay_attr.store = clkgate_delay_store; | |
274 | sysfs_attr_init(&host->clkgate_delay_attr.attr); | |
275 | host->clkgate_delay_attr.attr.name = "clkgate_delay"; | |
276 | host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; | |
277 | if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) | |
278 | pr_err("%s: Failed to create clkgate_delay sysfs entry\n", | |
279 | mmc_hostname(host)); | |
280 | } | |
04566831 LW |
281 | #else |
282 | ||
283 | static inline void mmc_host_clk_init(struct mmc_host *host) | |
284 | { | |
285 | } | |
286 | ||
287 | static inline void mmc_host_clk_exit(struct mmc_host *host) | |
288 | { | |
289 | } | |
290 | ||
597dd9d7 SRT |
291 | static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) |
292 | { | |
293 | } | |
294 | ||
04566831 LW |
295 | #endif |
296 | ||
b93931a6 PO |
297 | /** |
298 | * mmc_alloc_host - initialise the per-host structure. | |
299 | * @extra: sizeof private data structure | |
300 | * @dev: pointer to host device model structure | |
301 | * | |
302 | * Initialise the per-host structure. | |
303 | */ | |
304 | struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |
305 | { | |
ff3112f5 | 306 | int err; |
b93931a6 PO |
307 | struct mmc_host *host; |
308 | ||
ff3112f5 PO |
309 | if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) |
310 | return NULL; | |
311 | ||
be760a9d | 312 | host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); |
b93931a6 PO |
313 | if (!host) |
314 | return NULL; | |
315 | ||
ff3112f5 PO |
316 | spin_lock(&mmc_host_lock); |
317 | err = idr_get_new(&mmc_host_idr, host, &host->index); | |
318 | spin_unlock(&mmc_host_lock); | |
319 | if (err) | |
320 | goto free; | |
321 | ||
d1b26863 | 322 | dev_set_name(&host->class_dev, "mmc%d", host->index); |
ff3112f5 | 323 | |
b93931a6 PO |
324 | host->parent = dev; |
325 | host->class_dev.parent = dev; | |
326 | host->class_dev.class = &mmc_host_class; | |
327 | device_initialize(&host->class_dev); | |
328 | ||
04566831 LW |
329 | mmc_host_clk_init(host); |
330 | ||
a7d1a1eb | 331 | mutex_init(&host->slot.lock); |
27410ee7 GL |
332 | host->slot.cd_irq = -EINVAL; |
333 | ||
b93931a6 PO |
334 | spin_lock_init(&host->lock); |
335 | init_waitqueue_head(&host->wq); | |
336 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); | |
81ca03a0 | 337 | #ifdef CONFIG_PM |
4c2ef25f | 338 | host->pm_notify.notifier_call = mmc_pm_notify; |
81ca03a0 | 339 | #endif |
b93931a6 PO |
340 | |
341 | /* | |
342 | * By default, hosts do not support SGIO or large requests. | |
343 | * They have to set these according to their abilities. | |
344 | */ | |
a36274e0 | 345 | host->max_segs = 1; |
b93931a6 PO |
346 | host->max_seg_size = PAGE_CACHE_SIZE; |
347 | ||
348 | host->max_req_size = PAGE_CACHE_SIZE; | |
349 | host->max_blk_size = 512; | |
350 | host->max_blk_count = PAGE_CACHE_SIZE / 512; | |
351 | ||
352 | return host; | |
ff3112f5 PO |
353 | |
354 | free: | |
355 | kfree(host); | |
356 | return NULL; | |
b93931a6 PO |
357 | } |
358 | ||
359 | EXPORT_SYMBOL(mmc_alloc_host); | |
360 | ||
361 | /** | |
362 | * mmc_add_host - initialise host hardware | |
363 | * @host: mmc host | |
67a61c48 PO |
364 | * |
365 | * Register the host with the driver model. The host must be | |
366 | * prepared to start servicing requests before this function | |
367 | * completes. | |
b93931a6 PO |
368 | */ |
369 | int mmc_add_host(struct mmc_host *host) | |
370 | { | |
371 | int err; | |
372 | ||
17b759af NP |
373 | WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && |
374 | !host->ops->enable_sdio_irq); | |
375 | ||
b93931a6 PO |
376 | err = device_add(&host->class_dev); |
377 | if (err) | |
378 | return err; | |
379 | ||
f317dfeb WS |
380 | led_trigger_register_simple(dev_name(&host->class_dev), &host->led); |
381 | ||
6edd8ee6 HS |
382 | #ifdef CONFIG_DEBUG_FS |
383 | mmc_add_host_debugfs(host); | |
384 | #endif | |
597dd9d7 | 385 | mmc_host_clk_sysfs_init(host); |
6edd8ee6 | 386 | |
b93931a6 | 387 | mmc_start_host(host); |
4c2ef25f | 388 | register_pm_notifier(&host->pm_notify); |
b93931a6 PO |
389 | |
390 | return 0; | |
391 | } | |
392 | ||
393 | EXPORT_SYMBOL(mmc_add_host); | |
394 | ||
395 | /** | |
396 | * mmc_remove_host - remove host hardware | |
397 | * @host: mmc host | |
398 | * | |
399 | * Unregister and remove all cards associated with this host, | |
67a61c48 PO |
400 | * and power down the MMC bus. No new requests will be issued |
401 | * after this function has returned. | |
b93931a6 PO |
402 | */ |
403 | void mmc_remove_host(struct mmc_host *host) | |
404 | { | |
4c2ef25f | 405 | unregister_pm_notifier(&host->pm_notify); |
b93931a6 PO |
406 | mmc_stop_host(host); |
407 | ||
6edd8ee6 HS |
408 | #ifdef CONFIG_DEBUG_FS |
409 | mmc_remove_host_debugfs(host); | |
410 | #endif | |
411 | ||
b93931a6 PO |
412 | device_del(&host->class_dev); |
413 | ||
77f1fd6e | 414 | led_trigger_unregister_simple(host->led); |
04566831 LW |
415 | |
416 | mmc_host_clk_exit(host); | |
b93931a6 PO |
417 | } |
418 | ||
419 | EXPORT_SYMBOL(mmc_remove_host); | |
420 | ||
421 | /** | |
422 | * mmc_free_host - free the host structure | |
423 | * @host: mmc host | |
424 | * | |
425 | * Free the host once all references to it have been dropped. | |
426 | */ | |
427 | void mmc_free_host(struct mmc_host *host) | |
428 | { | |
ff3112f5 PO |
429 | spin_lock(&mmc_host_lock); |
430 | idr_remove(&mmc_host_idr, host->index); | |
431 | spin_unlock(&mmc_host_lock); | |
432 | ||
b93931a6 PO |
433 | put_device(&host->class_dev); |
434 | } | |
435 | ||
436 | EXPORT_SYMBOL(mmc_free_host); |