Commit | Line | Data |
---|---|---|
7c236c43 | 1 | /**************************************************************************** |
f7a6d2c4 BH |
2 | * Driver for Solarflare network controllers and boards |
3 | * Copyright 2011-2013 Solarflare Communications Inc. | |
7c236c43 SH |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | /* Theory of operation: | |
11 | * | |
12 | * PTP support is assisted by firmware running on the MC, which provides | |
13 | * the hardware timestamping capabilities. Both transmitted and received | |
14 | * PTP event packets are queued onto internal queues for subsequent processing; | |
15 | * this is because the MC operations are relatively long and would block | |
16 | * block NAPI/interrupt operation. | |
17 | * | |
18 | * Receive event processing: | |
19 | * The event contains the packet's UUID and sequence number, together | |
20 | * with the hardware timestamp. The PTP receive packet queue is searched | |
21 | * for this UUID/sequence number and, if found, put on a pending queue. | |
22 | * Packets not matching are delivered without timestamps (MCDI events will | |
23 | * always arrive after the actual packet). | |
24 | * It is important for the operation of the PTP protocol that the ordering | |
25 | * of packets between the event and general port is maintained. | |
26 | * | |
27 | * Work queue processing: | |
28 | * If work waiting, synchronise host/hardware time | |
29 | * | |
30 | * Transmit: send packet through MC, which returns the transmission time | |
31 | * that is converted to an appropriate timestamp. | |
32 | * | |
33 | * Receive: the packet's reception time is converted to an appropriate | |
34 | * timestamp. | |
35 | */ | |
36 | #include <linux/ip.h> | |
37 | #include <linux/udp.h> | |
38 | #include <linux/time.h> | |
39 | #include <linux/ktime.h> | |
40 | #include <linux/module.h> | |
41 | #include <linux/net_tstamp.h> | |
42 | #include <linux/pps_kernel.h> | |
43 | #include <linux/ptp_clock_kernel.h> | |
44 | #include "net_driver.h" | |
45 | #include "efx.h" | |
46 | #include "mcdi.h" | |
47 | #include "mcdi_pcol.h" | |
48 | #include "io.h" | |
8b8a95a1 | 49 | #include "farch_regs.h" |
7c236c43 SH |
50 | #include "nic.h" |
51 | ||
52 | /* Maximum number of events expected to make up a PTP event */ | |
53 | #define MAX_EVENT_FRAGS 3 | |
54 | ||
55 | /* Maximum delay, ms, to begin synchronisation */ | |
56 | #define MAX_SYNCHRONISE_WAIT_MS 2 | |
57 | ||
58 | /* How long, at most, to spend synchronising */ | |
59 | #define SYNCHRONISE_PERIOD_NS 250000 | |
60 | ||
61 | /* How often to update the shared memory time */ | |
62 | #define SYNCHRONISATION_GRANULARITY_NS 200 | |
63 | ||
64 | /* Minimum permitted length of a (corrected) synchronisation time */ | |
65 | #define MIN_SYNCHRONISATION_NS 120 | |
66 | ||
67 | /* Maximum permitted length of a (corrected) synchronisation time */ | |
68 | #define MAX_SYNCHRONISATION_NS 1000 | |
69 | ||
70 | /* How many (MC) receive events that can be queued */ | |
71 | #define MAX_RECEIVE_EVENTS 8 | |
72 | ||
73 | /* Length of (modified) moving average. */ | |
74 | #define AVERAGE_LENGTH 16 | |
75 | ||
76 | /* How long an unmatched event or packet can be held */ | |
77 | #define PKT_EVENT_LIFETIME_MS 10 | |
78 | ||
79 | /* Offsets into PTP packet for identification. These offsets are from the | |
80 | * start of the IP header, not the MAC header. Note that neither PTP V1 nor | |
81 | * PTP V2 permit the use of IPV4 options. | |
82 | */ | |
83 | #define PTP_DPORT_OFFSET 22 | |
84 | ||
85 | #define PTP_V1_VERSION_LENGTH 2 | |
86 | #define PTP_V1_VERSION_OFFSET 28 | |
87 | ||
88 | #define PTP_V1_UUID_LENGTH 6 | |
89 | #define PTP_V1_UUID_OFFSET 50 | |
90 | ||
91 | #define PTP_V1_SEQUENCE_LENGTH 2 | |
92 | #define PTP_V1_SEQUENCE_OFFSET 58 | |
93 | ||
94 | /* The minimum length of a PTP V1 packet for offsets, etc. to be valid: | |
95 | * includes IP header. | |
96 | */ | |
97 | #define PTP_V1_MIN_LENGTH 64 | |
98 | ||
99 | #define PTP_V2_VERSION_LENGTH 1 | |
100 | #define PTP_V2_VERSION_OFFSET 29 | |
101 | ||
c939a316 LE |
102 | #define PTP_V2_UUID_LENGTH 8 |
103 | #define PTP_V2_UUID_OFFSET 48 | |
104 | ||
7c236c43 SH |
105 | /* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), |
106 | * the MC only captures the last six bytes of the clock identity. These values | |
107 | * reflect those, not the ones used in the standard. The standard permits | |
108 | * mapping of V1 UUIDs to V2 UUIDs with these same values. | |
109 | */ | |
110 | #define PTP_V2_MC_UUID_LENGTH 6 | |
111 | #define PTP_V2_MC_UUID_OFFSET 50 | |
112 | ||
113 | #define PTP_V2_SEQUENCE_LENGTH 2 | |
114 | #define PTP_V2_SEQUENCE_OFFSET 58 | |
115 | ||
116 | /* The minimum length of a PTP V2 packet for offsets, etc. to be valid: | |
117 | * includes IP header. | |
118 | */ | |
119 | #define PTP_V2_MIN_LENGTH 63 | |
120 | ||
121 | #define PTP_MIN_LENGTH 63 | |
122 | ||
123 | #define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ | |
124 | #define PTP_EVENT_PORT 319 | |
125 | #define PTP_GENERAL_PORT 320 | |
126 | ||
127 | /* Annoyingly the format of the version numbers are different between | |
128 | * versions 1 and 2 so it isn't possible to simply look for 1 or 2. | |
129 | */ | |
130 | #define PTP_VERSION_V1 1 | |
131 | ||
132 | #define PTP_VERSION_V2 2 | |
133 | #define PTP_VERSION_V2_MASK 0x0f | |
134 | ||
135 | enum ptp_packet_state { | |
136 | PTP_PACKET_STATE_UNMATCHED = 0, | |
137 | PTP_PACKET_STATE_MATCHED, | |
138 | PTP_PACKET_STATE_TIMED_OUT, | |
139 | PTP_PACKET_STATE_MATCH_UNWANTED | |
140 | }; | |
141 | ||
142 | /* NIC synchronised with single word of time only comprising | |
143 | * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. | |
144 | */ | |
145 | #define MC_NANOSECOND_BITS 30 | |
146 | #define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) | |
147 | #define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) | |
148 | ||
149 | /* Maximum parts-per-billion adjustment that is acceptable */ | |
150 | #define MAX_PPB 1000000 | |
151 | ||
152 | /* Number of bits required to hold the above */ | |
153 | #define MAX_PPB_BITS 20 | |
154 | ||
155 | /* Number of extra bits allowed when calculating fractional ns. | |
156 | * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should | |
157 | * be less than 63. | |
158 | */ | |
159 | #define PPB_EXTRA_BITS 2 | |
160 | ||
161 | /* Precalculate scale word to avoid long long division at runtime */ | |
162 | #define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\ | |
163 | MAX_PPB_BITS)) / 1000000000LL) | |
164 | ||
165 | #define PTP_SYNC_ATTEMPTS 4 | |
166 | ||
167 | /** | |
168 | * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. | |
169 | * @words: UUID and (partial) sequence number | |
170 | * @expiry: Time after which the packet should be delivered irrespective of | |
171 | * event arrival. | |
172 | * @state: The state of the packet - whether it is ready for processing or | |
173 | * whether that is of no interest. | |
174 | */ | |
175 | struct efx_ptp_match { | |
176 | u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; | |
177 | unsigned long expiry; | |
178 | enum ptp_packet_state state; | |
179 | }; | |
180 | ||
181 | /** | |
182 | * struct efx_ptp_event_rx - A PTP receive event (from MC) | |
183 | * @seq0: First part of (PTP) UUID | |
184 | * @seq1: Second part of (PTP) UUID and sequence number | |
185 | * @hwtimestamp: Event timestamp | |
186 | */ | |
187 | struct efx_ptp_event_rx { | |
188 | struct list_head link; | |
189 | u32 seq0; | |
190 | u32 seq1; | |
191 | ktime_t hwtimestamp; | |
192 | unsigned long expiry; | |
193 | }; | |
194 | ||
195 | /** | |
196 | * struct efx_ptp_timeset - Synchronisation between host and MC | |
197 | * @host_start: Host time immediately before hardware timestamp taken | |
198 | * @seconds: Hardware timestamp, seconds | |
199 | * @nanoseconds: Hardware timestamp, nanoseconds | |
200 | * @host_end: Host time immediately after hardware timestamp taken | |
201 | * @waitns: Number of nanoseconds between hardware timestamp being read and | |
202 | * host end time being seen | |
203 | * @window: Difference of host_end and host_start | |
204 | * @valid: Whether this timeset is valid | |
205 | */ | |
206 | struct efx_ptp_timeset { | |
207 | u32 host_start; | |
208 | u32 seconds; | |
209 | u32 nanoseconds; | |
210 | u32 host_end; | |
211 | u32 waitns; | |
212 | u32 window; /* Derived: end - start, allowing for wrap */ | |
213 | }; | |
214 | ||
215 | /** | |
216 | * struct efx_ptp_data - Precision Time Protocol (PTP) state | |
ac36baf8 BH |
217 | * @efx: The NIC context |
218 | * @channel: The PTP channel (Siena only) | |
7c236c43 SH |
219 | * @rxq: Receive queue (awaiting timestamps) |
220 | * @txq: Transmit queue | |
221 | * @evt_list: List of MC receive events awaiting packets | |
222 | * @evt_free_list: List of free events | |
223 | * @evt_lock: Lock for manipulating evt_list and evt_free_list | |
f3211600 | 224 | * @evt_overflow: Boolean indicating that event list has overflowed |
7c236c43 SH |
225 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) |
226 | * @workwq: Work queue for processing pending PTP operations | |
227 | * @work: Work task | |
228 | * @reset_required: A serious error has occurred and the PTP task needs to be | |
229 | * reset (disable, enable). | |
230 | * @rxfilter_event: Receive filter when operating | |
231 | * @rxfilter_general: Receive filter when operating | |
232 | * @config: Current timestamp configuration | |
233 | * @enabled: PTP operation enabled | |
234 | * @mode: Mode in which PTP operating (PTP version) | |
235 | * @evt_frags: Partly assembled PTP events | |
236 | * @evt_frag_idx: Current fragment number | |
237 | * @evt_code: Last event code | |
238 | * @start: Address at which MC indicates ready for synchronisation | |
239 | * @host_time_pps: Host time at last PPS | |
240 | * @last_sync_ns: Last number of nanoseconds between readings when synchronising | |
241 | * @base_sync_ns: Number of nanoseconds for last synchronisation. | |
242 | * @base_sync_valid: Whether base_sync_time is valid. | |
243 | * @current_adjfreq: Current ppb adjustment. | |
244 | * @phc_clock: Pointer to registered phc device | |
245 | * @phc_clock_info: Registration structure for phc device | |
246 | * @pps_work: pps work task for handling pps events | |
247 | * @pps_workwq: pps work queue | |
248 | * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled | |
249 | * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids | |
250 | * allocations in main data path). | |
251 | * @debug_ptp_dir: PTP debugfs directory | |
252 | * @missed_rx_sync: Number of packets received without syncrhonisation. | |
253 | * @good_syncs: Number of successful synchronisations. | |
254 | * @no_time_syncs: Number of synchronisations with no good times. | |
255 | * @bad_sync_durations: Number of synchronisations with bad durations. | |
256 | * @bad_syncs: Number of failed synchronisations. | |
257 | * @last_sync_time: Number of nanoseconds for last synchronisation. | |
258 | * @sync_timeouts: Number of synchronisation timeouts | |
259 | * @fast_syncs: Number of synchronisations requiring short delay | |
260 | * @min_sync_delta: Minimum time between event and synchronisation | |
261 | * @max_sync_delta: Maximum time between event and synchronisation | |
262 | * @average_sync_delta: Average time between event and synchronisation. | |
263 | * Modified moving average. | |
264 | * @last_sync_delta: Last time between event and synchronisation | |
265 | * @mc_stats: Context value for MC statistics | |
266 | * @timeset: Last set of synchronisation statistics. | |
267 | */ | |
268 | struct efx_ptp_data { | |
ac36baf8 | 269 | struct efx_nic *efx; |
7c236c43 SH |
270 | struct efx_channel *channel; |
271 | struct sk_buff_head rxq; | |
272 | struct sk_buff_head txq; | |
273 | struct list_head evt_list; | |
274 | struct list_head evt_free_list; | |
275 | spinlock_t evt_lock; | |
f3211600 | 276 | bool evt_overflow; |
7c236c43 SH |
277 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; |
278 | struct workqueue_struct *workwq; | |
279 | struct work_struct work; | |
280 | bool reset_required; | |
281 | u32 rxfilter_event; | |
282 | u32 rxfilter_general; | |
283 | bool rxfilter_installed; | |
284 | struct hwtstamp_config config; | |
285 | bool enabled; | |
286 | unsigned int mode; | |
287 | efx_qword_t evt_frags[MAX_EVENT_FRAGS]; | |
288 | int evt_frag_idx; | |
289 | int evt_code; | |
290 | struct efx_buffer start; | |
291 | struct pps_event_time host_time_pps; | |
292 | unsigned last_sync_ns; | |
293 | unsigned base_sync_ns; | |
294 | bool base_sync_valid; | |
295 | s64 current_adjfreq; | |
296 | struct ptp_clock *phc_clock; | |
297 | struct ptp_clock_info phc_clock_info; | |
298 | struct work_struct pps_work; | |
299 | struct workqueue_struct *pps_workwq; | |
300 | bool nic_ts_enabled; | |
c5bb0e98 | 301 | MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); |
7c236c43 SH |
302 | struct efx_ptp_timeset |
303 | timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; | |
304 | }; | |
305 | ||
306 | static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); | |
307 | static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); | |
308 | static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts); | |
309 | static int efx_phc_settime(struct ptp_clock_info *ptp, | |
310 | const struct timespec *e_ts); | |
311 | static int efx_phc_enable(struct ptp_clock_info *ptp, | |
312 | struct ptp_clock_request *request, int on); | |
313 | ||
314 | /* Enable MCDI PTP support. */ | |
315 | static int efx_ptp_enable(struct efx_nic *efx) | |
316 | { | |
59cfc479 | 317 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); |
1e0b8120 EC |
318 | MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); |
319 | int rc; | |
7c236c43 SH |
320 | |
321 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); | |
c1d828bd | 322 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
7c236c43 | 323 | MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, |
ac36baf8 BH |
324 | efx->ptp_data->channel ? |
325 | efx->ptp_data->channel->channel : 0); | |
7c236c43 SH |
326 | MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); |
327 | ||
1e0b8120 EC |
328 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), |
329 | outbuf, sizeof(outbuf), NULL); | |
330 | rc = (rc == -EALREADY) ? 0 : rc; | |
331 | if (rc) | |
332 | efx_mcdi_display_error(efx, MC_CMD_PTP, | |
333 | MC_CMD_PTP_IN_ENABLE_LEN, | |
334 | outbuf, sizeof(outbuf), rc); | |
335 | return rc; | |
7c236c43 SH |
336 | } |
337 | ||
338 | /* Disable MCDI PTP support. | |
339 | * | |
340 | * Note that this function should never rely on the presence of ptp_data - | |
341 | * may be called before that exists. | |
342 | */ | |
343 | static int efx_ptp_disable(struct efx_nic *efx) | |
344 | { | |
59cfc479 | 345 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); |
1e0b8120 EC |
346 | MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); |
347 | int rc; | |
7c236c43 SH |
348 | |
349 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); | |
c1d828bd | 350 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
1e0b8120 EC |
351 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), |
352 | outbuf, sizeof(outbuf), NULL); | |
353 | rc = (rc == -EALREADY) ? 0 : rc; | |
354 | if (rc) | |
355 | efx_mcdi_display_error(efx, MC_CMD_PTP, | |
356 | MC_CMD_PTP_IN_DISABLE_LEN, | |
357 | outbuf, sizeof(outbuf), rc); | |
358 | return rc; | |
7c236c43 SH |
359 | } |
360 | ||
361 | static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) | |
362 | { | |
363 | struct sk_buff *skb; | |
364 | ||
365 | while ((skb = skb_dequeue(q))) { | |
366 | local_bh_disable(); | |
367 | netif_receive_skb(skb); | |
368 | local_bh_enable(); | |
369 | } | |
370 | } | |
371 | ||
372 | static void efx_ptp_handle_no_channel(struct efx_nic *efx) | |
373 | { | |
374 | netif_err(efx, drv, efx->net_dev, | |
375 | "ERROR: PTP requires MSI-X and 1 additional interrupt" | |
376 | "vector. PTP disabled\n"); | |
377 | } | |
378 | ||
379 | /* Repeatedly send the host time to the MC which will capture the hardware | |
380 | * time. | |
381 | */ | |
382 | static void efx_ptp_send_times(struct efx_nic *efx, | |
383 | struct pps_event_time *last_time) | |
384 | { | |
385 | struct pps_event_time now; | |
386 | struct timespec limit; | |
387 | struct efx_ptp_data *ptp = efx->ptp_data; | |
388 | struct timespec start; | |
389 | int *mc_running = ptp->start.addr; | |
390 | ||
391 | pps_get_ts(&now); | |
392 | start = now.ts_real; | |
393 | limit = now.ts_real; | |
394 | timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); | |
395 | ||
396 | /* Write host time for specified period or until MC is done */ | |
397 | while ((timespec_compare(&now.ts_real, &limit) < 0) && | |
398 | ACCESS_ONCE(*mc_running)) { | |
399 | struct timespec update_time; | |
400 | unsigned int host_time; | |
401 | ||
402 | /* Don't update continuously to avoid saturating the PCIe bus */ | |
403 | update_time = now.ts_real; | |
404 | timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); | |
405 | do { | |
406 | pps_get_ts(&now); | |
407 | } while ((timespec_compare(&now.ts_real, &update_time) < 0) && | |
408 | ACCESS_ONCE(*mc_running)); | |
409 | ||
410 | /* Synchronise NIC with single word of time only */ | |
411 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | | |
412 | now.ts_real.tv_nsec); | |
413 | /* Update host time in NIC memory */ | |
977a5d5d | 414 | efx->type->ptp_write_host_time(efx, host_time); |
7c236c43 SH |
415 | } |
416 | *last_time = now; | |
417 | } | |
418 | ||
419 | /* Read a timeset from the MC's results and partial process. */ | |
c5bb0e98 BH |
420 | static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), |
421 | struct efx_ptp_timeset *timeset) | |
7c236c43 SH |
422 | { |
423 | unsigned start_ns, end_ns; | |
424 | ||
425 | timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); | |
426 | timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS); | |
427 | timeset->nanoseconds = MCDI_DWORD(data, | |
428 | PTP_OUT_SYNCHRONIZE_NANOSECONDS); | |
429 | timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), | |
430 | timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); | |
431 | ||
432 | /* Ignore seconds */ | |
433 | start_ns = timeset->host_start & MC_NANOSECOND_MASK; | |
434 | end_ns = timeset->host_end & MC_NANOSECOND_MASK; | |
435 | /* Allow for rollover */ | |
436 | if (end_ns < start_ns) | |
437 | end_ns += NSEC_PER_SEC; | |
438 | /* Determine duration of operation */ | |
439 | timeset->window = end_ns - start_ns; | |
440 | } | |
441 | ||
442 | /* Process times received from MC. | |
443 | * | |
444 | * Extract times from returned results, and establish the minimum value | |
445 | * seen. The minimum value represents the "best" possible time and events | |
446 | * too much greater than this are rejected - the machine is, perhaps, too | |
447 | * busy. A number of readings are taken so that, hopefully, at least one good | |
448 | * synchronisation will be seen in the results. | |
449 | */ | |
c5bb0e98 BH |
450 | static int |
451 | efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), | |
452 | size_t response_length, | |
453 | const struct pps_event_time *last_time) | |
7c236c43 | 454 | { |
c5bb0e98 BH |
455 | unsigned number_readings = |
456 | MCDI_VAR_ARRAY_LEN(response_length, | |
457 | PTP_OUT_SYNCHRONIZE_TIMESET); | |
7c236c43 | 458 | unsigned i; |
7c236c43 SH |
459 | unsigned total; |
460 | unsigned ngood = 0; | |
461 | unsigned last_good = 0; | |
462 | struct efx_ptp_data *ptp = efx->ptp_data; | |
7c236c43 SH |
463 | u32 last_sec; |
464 | u32 start_sec; | |
465 | struct timespec delta; | |
466 | ||
467 | if (number_readings == 0) | |
468 | return -EAGAIN; | |
469 | ||
9230451a LE |
470 | /* Read the set of results and increment stats for any results that |
471 | * appera to be erroneous. | |
7c236c43 SH |
472 | */ |
473 | for (i = 0; i < number_readings; i++) { | |
c5bb0e98 BH |
474 | efx_ptp_read_timeset( |
475 | MCDI_ARRAY_STRUCT_PTR(synch_buf, | |
476 | PTP_OUT_SYNCHRONIZE_TIMESET, i), | |
477 | &ptp->timeset[i]); | |
7c236c43 SH |
478 | } |
479 | ||
9230451a LE |
480 | /* Find the last good host-MC synchronization result. The MC times |
481 | * when it finishes reading the host time so the corrected window time | |
482 | * should be fairly constant for a given platform. | |
7c236c43 SH |
483 | */ |
484 | total = 0; | |
485 | for (i = 0; i < number_readings; i++) | |
486 | if (ptp->timeset[i].window > ptp->timeset[i].waitns) { | |
487 | unsigned win; | |
488 | ||
489 | win = ptp->timeset[i].window - ptp->timeset[i].waitns; | |
490 | if (win >= MIN_SYNCHRONISATION_NS && | |
491 | win < MAX_SYNCHRONISATION_NS) { | |
492 | total += ptp->timeset[i].window; | |
493 | ngood++; | |
494 | last_good = i; | |
495 | } | |
496 | } | |
497 | ||
498 | if (ngood == 0) { | |
499 | netif_warn(efx, drv, efx->net_dev, | |
9230451a LE |
500 | "PTP no suitable synchronisations %dns\n", |
501 | ptp->base_sync_ns); | |
7c236c43 SH |
502 | return -EAGAIN; |
503 | } | |
504 | ||
505 | /* Average minimum this synchronisation */ | |
506 | ptp->last_sync_ns = DIV_ROUND_UP(total, ngood); | |
507 | if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) { | |
508 | ptp->base_sync_valid = true; | |
509 | ptp->base_sync_ns = ptp->last_sync_ns; | |
510 | } | |
511 | ||
512 | /* Calculate delay from actual PPS to last_time */ | |
513 | delta.tv_nsec = | |
514 | ptp->timeset[last_good].nanoseconds + | |
515 | last_time->ts_real.tv_nsec - | |
516 | (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); | |
517 | ||
518 | /* It is possible that the seconds rolled over between taking | |
519 | * the start reading and the last value written by the host. The | |
520 | * timescales are such that a gap of more than one second is never | |
521 | * expected. | |
522 | */ | |
523 | start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; | |
524 | last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; | |
525 | if (start_sec != last_sec) { | |
526 | if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { | |
527 | netif_warn(efx, hw, efx->net_dev, | |
528 | "PTP bad synchronisation seconds\n"); | |
529 | return -EAGAIN; | |
530 | } else { | |
531 | delta.tv_sec = 1; | |
532 | } | |
533 | } else { | |
534 | delta.tv_sec = 0; | |
535 | } | |
536 | ||
537 | ptp->host_time_pps = *last_time; | |
538 | pps_sub_ts(&ptp->host_time_pps, delta); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | /* Synchronize times between the host and the MC */ | |
544 | static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |
545 | { | |
546 | struct efx_ptp_data *ptp = efx->ptp_data; | |
59cfc479 | 547 | MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); |
7c236c43 SH |
548 | size_t response_length; |
549 | int rc; | |
550 | unsigned long timeout; | |
551 | struct pps_event_time last_time = {}; | |
552 | unsigned int loops = 0; | |
553 | int *start = ptp->start.addr; | |
554 | ||
555 | MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); | |
c1d828bd | 556 | MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0); |
7c236c43 SH |
557 | MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, |
558 | num_readings); | |
338f74df BH |
559 | MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, |
560 | ptp->start.dma_addr); | |
7c236c43 SH |
561 | |
562 | /* Clear flag that signals MC ready */ | |
563 | ACCESS_ONCE(*start) = 0; | |
df2cd8af BH |
564 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, |
565 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); | |
566 | EFX_BUG_ON_PARANOID(rc); | |
7c236c43 SH |
567 | |
568 | /* Wait for start from MCDI (or timeout) */ | |
569 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); | |
570 | while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { | |
571 | udelay(20); /* Usually start MCDI execution quickly */ | |
572 | loops++; | |
573 | } | |
574 | ||
575 | if (ACCESS_ONCE(*start)) | |
576 | efx_ptp_send_times(efx, &last_time); | |
577 | ||
578 | /* Collect results */ | |
579 | rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, | |
580 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN, | |
581 | synch_buf, sizeof(synch_buf), | |
582 | &response_length); | |
583 | if (rc == 0) | |
584 | rc = efx_ptp_process_times(efx, synch_buf, response_length, | |
585 | &last_time); | |
586 | ||
587 | return rc; | |
588 | } | |
589 | ||
590 | /* Transmit a PTP packet, via the MCDI interface, to the wire. */ | |
591 | static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) | |
592 | { | |
c5bb0e98 | 593 | struct efx_ptp_data *ptp_data = efx->ptp_data; |
7c236c43 SH |
594 | struct skb_shared_hwtstamps timestamps; |
595 | int rc = -EIO; | |
59cfc479 | 596 | MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); |
9528b921 | 597 | size_t len; |
7c236c43 | 598 | |
c5bb0e98 | 599 | MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); |
c1d828bd | 600 | MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); |
c5bb0e98 | 601 | MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); |
7c236c43 SH |
602 | if (skb_shinfo(skb)->nr_frags != 0) { |
603 | rc = skb_linearize(skb); | |
604 | if (rc != 0) | |
605 | goto fail; | |
606 | } | |
607 | ||
608 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
609 | rc = skb_checksum_help(skb); | |
610 | if (rc != 0) | |
611 | goto fail; | |
612 | } | |
613 | skb_copy_from_linear_data(skb, | |
c5bb0e98 BH |
614 | MCDI_PTR(ptp_data->txbuf, |
615 | PTP_IN_TRANSMIT_PACKET), | |
9528b921 BH |
616 | skb->len); |
617 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, | |
618 | ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), | |
619 | txtime, sizeof(txtime), &len); | |
7c236c43 SH |
620 | if (rc != 0) |
621 | goto fail; | |
622 | ||
623 | memset(×tamps, 0, sizeof(timestamps)); | |
624 | timestamps.hwtstamp = ktime_set( | |
625 | MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS), | |
626 | MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS)); | |
627 | ||
628 | skb_tstamp_tx(skb, ×tamps); | |
629 | ||
630 | rc = 0; | |
631 | ||
632 | fail: | |
633 | dev_kfree_skb(skb); | |
634 | ||
635 | return rc; | |
636 | } | |
637 | ||
638 | static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) | |
639 | { | |
640 | struct efx_ptp_data *ptp = efx->ptp_data; | |
641 | struct list_head *cursor; | |
642 | struct list_head *next; | |
643 | ||
644 | /* Drop time-expired events */ | |
645 | spin_lock_bh(&ptp->evt_lock); | |
646 | if (!list_empty(&ptp->evt_list)) { | |
647 | list_for_each_safe(cursor, next, &ptp->evt_list) { | |
648 | struct efx_ptp_event_rx *evt; | |
649 | ||
650 | evt = list_entry(cursor, struct efx_ptp_event_rx, | |
651 | link); | |
652 | if (time_after(jiffies, evt->expiry)) { | |
9545f4e2 | 653 | list_move(&evt->link, &ptp->evt_free_list); |
7c236c43 SH |
654 | netif_warn(efx, hw, efx->net_dev, |
655 | "PTP rx event dropped\n"); | |
656 | } | |
657 | } | |
658 | } | |
f3211600 LE |
659 | /* If the event overflow flag is set and the event list is now empty |
660 | * clear the flag to re-enable the overflow warning message. | |
661 | */ | |
662 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | |
663 | ptp->evt_overflow = false; | |
7c236c43 SH |
664 | spin_unlock_bh(&ptp->evt_lock); |
665 | } | |
666 | ||
667 | static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, | |
668 | struct sk_buff *skb) | |
669 | { | |
670 | struct efx_ptp_data *ptp = efx->ptp_data; | |
671 | bool evts_waiting; | |
672 | struct list_head *cursor; | |
673 | struct list_head *next; | |
674 | struct efx_ptp_match *match; | |
675 | enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; | |
676 | ||
677 | spin_lock_bh(&ptp->evt_lock); | |
678 | evts_waiting = !list_empty(&ptp->evt_list); | |
679 | spin_unlock_bh(&ptp->evt_lock); | |
680 | ||
681 | if (!evts_waiting) | |
682 | return PTP_PACKET_STATE_UNMATCHED; | |
683 | ||
684 | match = (struct efx_ptp_match *)skb->cb; | |
685 | /* Look for a matching timestamp in the event queue */ | |
686 | spin_lock_bh(&ptp->evt_lock); | |
687 | list_for_each_safe(cursor, next, &ptp->evt_list) { | |
688 | struct efx_ptp_event_rx *evt; | |
689 | ||
690 | evt = list_entry(cursor, struct efx_ptp_event_rx, link); | |
691 | if ((evt->seq0 == match->words[0]) && | |
692 | (evt->seq1 == match->words[1])) { | |
693 | struct skb_shared_hwtstamps *timestamps; | |
694 | ||
695 | /* Match - add in hardware timestamp */ | |
696 | timestamps = skb_hwtstamps(skb); | |
697 | timestamps->hwtstamp = evt->hwtimestamp; | |
698 | ||
699 | match->state = PTP_PACKET_STATE_MATCHED; | |
700 | rc = PTP_PACKET_STATE_MATCHED; | |
9545f4e2 | 701 | list_move(&evt->link, &ptp->evt_free_list); |
7c236c43 SH |
702 | break; |
703 | } | |
704 | } | |
f3211600 LE |
705 | /* If the event overflow flag is set and the event list is now empty |
706 | * clear the flag to re-enable the overflow warning message. | |
707 | */ | |
708 | if (ptp->evt_overflow && list_empty(&ptp->evt_list)) | |
709 | ptp->evt_overflow = false; | |
7c236c43 SH |
710 | spin_unlock_bh(&ptp->evt_lock); |
711 | ||
712 | return rc; | |
713 | } | |
714 | ||
715 | /* Process any queued receive events and corresponding packets | |
716 | * | |
717 | * q is returned with all the packets that are ready for delivery. | |
718 | * true is returned if at least one of those packets requires | |
719 | * synchronisation. | |
720 | */ | |
721 | static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) | |
722 | { | |
723 | struct efx_ptp_data *ptp = efx->ptp_data; | |
724 | bool rc = false; | |
725 | struct sk_buff *skb; | |
726 | ||
727 | while ((skb = skb_dequeue(&ptp->rxq))) { | |
728 | struct efx_ptp_match *match; | |
729 | ||
730 | match = (struct efx_ptp_match *)skb->cb; | |
731 | if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { | |
732 | __skb_queue_tail(q, skb); | |
733 | } else if (efx_ptp_match_rx(efx, skb) == | |
734 | PTP_PACKET_STATE_MATCHED) { | |
735 | rc = true; | |
736 | __skb_queue_tail(q, skb); | |
737 | } else if (time_after(jiffies, match->expiry)) { | |
738 | match->state = PTP_PACKET_STATE_TIMED_OUT; | |
35f9a7a3 BH |
739 | if (net_ratelimit()) |
740 | netif_warn(efx, rx_err, efx->net_dev, | |
741 | "PTP packet - no timestamp seen\n"); | |
7c236c43 SH |
742 | __skb_queue_tail(q, skb); |
743 | } else { | |
744 | /* Replace unprocessed entry and stop */ | |
745 | skb_queue_head(&ptp->rxq, skb); | |
746 | break; | |
747 | } | |
748 | } | |
749 | ||
750 | return rc; | |
751 | } | |
752 | ||
753 | /* Complete processing of a received packet */ | |
754 | static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) | |
755 | { | |
756 | local_bh_disable(); | |
757 | netif_receive_skb(skb); | |
758 | local_bh_enable(); | |
759 | } | |
760 | ||
62a1c703 BH |
761 | static void efx_ptp_remove_multicast_filters(struct efx_nic *efx) |
762 | { | |
763 | struct efx_ptp_data *ptp = efx->ptp_data; | |
764 | ||
765 | if (ptp->rxfilter_installed) { | |
766 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
767 | ptp->rxfilter_general); | |
768 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
769 | ptp->rxfilter_event); | |
770 | ptp->rxfilter_installed = false; | |
771 | } | |
772 | } | |
773 | ||
774 | static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) | |
7c236c43 SH |
775 | { |
776 | struct efx_ptp_data *ptp = efx->ptp_data; | |
777 | struct efx_filter_spec rxfilter; | |
778 | int rc; | |
779 | ||
ac36baf8 | 780 | if (!ptp->channel || ptp->rxfilter_installed) |
62a1c703 | 781 | return 0; |
7c236c43 SH |
782 | |
783 | /* Must filter on both event and general ports to ensure | |
784 | * that there is no packet re-ordering. | |
785 | */ | |
786 | efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, | |
787 | efx_rx_queue_index( | |
788 | efx_channel_get_rx_queue(ptp->channel))); | |
789 | rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, | |
790 | htonl(PTP_ADDRESS), | |
791 | htons(PTP_EVENT_PORT)); | |
792 | if (rc != 0) | |
793 | return rc; | |
794 | ||
795 | rc = efx_filter_insert_filter(efx, &rxfilter, true); | |
796 | if (rc < 0) | |
797 | return rc; | |
798 | ptp->rxfilter_event = rc; | |
799 | ||
800 | efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, | |
801 | efx_rx_queue_index( | |
802 | efx_channel_get_rx_queue(ptp->channel))); | |
803 | rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, | |
804 | htonl(PTP_ADDRESS), | |
805 | htons(PTP_GENERAL_PORT)); | |
806 | if (rc != 0) | |
807 | goto fail; | |
808 | ||
809 | rc = efx_filter_insert_filter(efx, &rxfilter, true); | |
810 | if (rc < 0) | |
811 | goto fail; | |
812 | ptp->rxfilter_general = rc; | |
813 | ||
62a1c703 BH |
814 | ptp->rxfilter_installed = true; |
815 | return 0; | |
816 | ||
817 | fail: | |
818 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
819 | ptp->rxfilter_event); | |
820 | return rc; | |
821 | } | |
822 | ||
823 | static int efx_ptp_start(struct efx_nic *efx) | |
824 | { | |
825 | struct efx_ptp_data *ptp = efx->ptp_data; | |
826 | int rc; | |
827 | ||
828 | ptp->reset_required = false; | |
829 | ||
830 | rc = efx_ptp_insert_multicast_filters(efx); | |
831 | if (rc) | |
832 | return rc; | |
833 | ||
7c236c43 SH |
834 | rc = efx_ptp_enable(efx); |
835 | if (rc != 0) | |
62a1c703 | 836 | goto fail; |
7c236c43 SH |
837 | |
838 | ptp->evt_frag_idx = 0; | |
839 | ptp->current_adjfreq = 0; | |
7c236c43 SH |
840 | |
841 | return 0; | |
842 | ||
7c236c43 | 843 | fail: |
62a1c703 | 844 | efx_ptp_remove_multicast_filters(efx); |
7c236c43 SH |
845 | return rc; |
846 | } | |
847 | ||
848 | static int efx_ptp_stop(struct efx_nic *efx) | |
849 | { | |
850 | struct efx_ptp_data *ptp = efx->ptp_data; | |
7c236c43 SH |
851 | struct list_head *cursor; |
852 | struct list_head *next; | |
2ea4dc28 AR |
853 | int rc; |
854 | ||
855 | if (ptp == NULL) | |
856 | return 0; | |
857 | ||
858 | rc = efx_ptp_disable(efx); | |
7c236c43 | 859 | |
62a1c703 | 860 | efx_ptp_remove_multicast_filters(efx); |
7c236c43 SH |
861 | |
862 | /* Make sure RX packets are really delivered */ | |
863 | efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); | |
864 | skb_queue_purge(&efx->ptp_data->txq); | |
865 | ||
866 | /* Drop any pending receive events */ | |
867 | spin_lock_bh(&efx->ptp_data->evt_lock); | |
868 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { | |
9545f4e2 | 869 | list_move(cursor, &efx->ptp_data->evt_free_list); |
7c236c43 | 870 | } |
f3211600 | 871 | ptp->evt_overflow = false; |
7c236c43 SH |
872 | spin_unlock_bh(&efx->ptp_data->evt_lock); |
873 | ||
874 | return rc; | |
875 | } | |
876 | ||
2ea4dc28 AR |
877 | static int efx_ptp_restart(struct efx_nic *efx) |
878 | { | |
879 | if (efx->ptp_data && efx->ptp_data->enabled) | |
880 | return efx_ptp_start(efx); | |
881 | return 0; | |
882 | } | |
883 | ||
7c236c43 SH |
884 | static void efx_ptp_pps_worker(struct work_struct *work) |
885 | { | |
886 | struct efx_ptp_data *ptp = | |
887 | container_of(work, struct efx_ptp_data, pps_work); | |
ac36baf8 | 888 | struct efx_nic *efx = ptp->efx; |
7c236c43 SH |
889 | struct ptp_clock_event ptp_evt; |
890 | ||
891 | if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) | |
892 | return; | |
893 | ||
894 | ptp_evt.type = PTP_CLOCK_PPSUSR; | |
895 | ptp_evt.pps_times = ptp->host_time_pps; | |
896 | ptp_clock_event(ptp->phc_clock, &ptp_evt); | |
897 | } | |
898 | ||
899 | /* Process any pending transmissions and timestamp any received packets. | |
900 | */ | |
901 | static void efx_ptp_worker(struct work_struct *work) | |
902 | { | |
903 | struct efx_ptp_data *ptp_data = | |
904 | container_of(work, struct efx_ptp_data, work); | |
ac36baf8 | 905 | struct efx_nic *efx = ptp_data->efx; |
7c236c43 SH |
906 | struct sk_buff *skb; |
907 | struct sk_buff_head tempq; | |
908 | ||
909 | if (ptp_data->reset_required) { | |
910 | efx_ptp_stop(efx); | |
911 | efx_ptp_start(efx); | |
912 | return; | |
913 | } | |
914 | ||
915 | efx_ptp_drop_time_expired_events(efx); | |
916 | ||
917 | __skb_queue_head_init(&tempq); | |
918 | if (efx_ptp_process_events(efx, &tempq) || | |
919 | !skb_queue_empty(&ptp_data->txq)) { | |
920 | ||
921 | while ((skb = skb_dequeue(&ptp_data->txq))) | |
922 | efx_ptp_xmit_skb(efx, skb); | |
923 | } | |
924 | ||
925 | while ((skb = __skb_dequeue(&tempq))) | |
926 | efx_ptp_process_rx(efx, skb); | |
927 | } | |
928 | ||
5d0dab01 BH |
929 | static const struct ptp_clock_info efx_phc_clock_info = { |
930 | .owner = THIS_MODULE, | |
931 | .name = "sfc", | |
932 | .max_adj = MAX_PPB, | |
933 | .n_alarm = 0, | |
934 | .n_ext_ts = 0, | |
935 | .n_per_out = 0, | |
936 | .pps = 1, | |
937 | .adjfreq = efx_phc_adjfreq, | |
938 | .adjtime = efx_phc_adjtime, | |
939 | .gettime = efx_phc_gettime, | |
940 | .settime = efx_phc_settime, | |
941 | .enable = efx_phc_enable, | |
942 | }; | |
943 | ||
ac36baf8 BH |
944 | /* Initialise PTP state. */ |
945 | int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) | |
7c236c43 | 946 | { |
7c236c43 SH |
947 | struct efx_ptp_data *ptp; |
948 | int rc = 0; | |
949 | unsigned int pos; | |
950 | ||
7c236c43 SH |
951 | ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); |
952 | efx->ptp_data = ptp; | |
953 | if (!efx->ptp_data) | |
954 | return -ENOMEM; | |
955 | ||
ac36baf8 BH |
956 | ptp->efx = efx; |
957 | ptp->channel = channel; | |
958 | ||
0d19a540 | 959 | rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); |
7c236c43 SH |
960 | if (rc != 0) |
961 | goto fail1; | |
962 | ||
7c236c43 SH |
963 | skb_queue_head_init(&ptp->rxq); |
964 | skb_queue_head_init(&ptp->txq); | |
965 | ptp->workwq = create_singlethread_workqueue("sfc_ptp"); | |
966 | if (!ptp->workwq) { | |
967 | rc = -ENOMEM; | |
968 | goto fail2; | |
969 | } | |
970 | ||
971 | INIT_WORK(&ptp->work, efx_ptp_worker); | |
972 | ptp->config.flags = 0; | |
973 | ptp->config.tx_type = HWTSTAMP_TX_OFF; | |
974 | ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; | |
975 | INIT_LIST_HEAD(&ptp->evt_list); | |
976 | INIT_LIST_HEAD(&ptp->evt_free_list); | |
977 | spin_lock_init(&ptp->evt_lock); | |
978 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) | |
979 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); | |
f3211600 | 980 | ptp->evt_overflow = false; |
7c236c43 | 981 | |
5d0dab01 | 982 | ptp->phc_clock_info = efx_phc_clock_info; |
1ef76158 RC |
983 | ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, |
984 | &efx->pci_dev->dev); | |
155d940a WY |
985 | if (IS_ERR(ptp->phc_clock)) { |
986 | rc = PTR_ERR(ptp->phc_clock); | |
7c236c43 | 987 | goto fail3; |
155d940a | 988 | } |
7c236c43 SH |
989 | |
990 | INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); | |
991 | ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); | |
992 | if (!ptp->pps_workwq) { | |
993 | rc = -ENOMEM; | |
994 | goto fail4; | |
995 | } | |
996 | ptp->nic_ts_enabled = false; | |
997 | ||
998 | return 0; | |
999 | fail4: | |
1000 | ptp_clock_unregister(efx->ptp_data->phc_clock); | |
1001 | ||
1002 | fail3: | |
1003 | destroy_workqueue(efx->ptp_data->workwq); | |
1004 | ||
1005 | fail2: | |
1006 | efx_nic_free_buffer(efx, &ptp->start); | |
1007 | ||
1008 | fail1: | |
1009 | kfree(efx->ptp_data); | |
1010 | efx->ptp_data = NULL; | |
1011 | ||
1012 | return rc; | |
1013 | } | |
1014 | ||
ac36baf8 BH |
1015 | /* Initialise PTP channel. |
1016 | * | |
1017 | * Setting core_index to zero causes the queue to be initialised and doesn't | |
1018 | * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. | |
1019 | */ | |
1020 | static int efx_ptp_probe_channel(struct efx_channel *channel) | |
7c236c43 SH |
1021 | { |
1022 | struct efx_nic *efx = channel->efx; | |
1023 | ||
ac36baf8 BH |
1024 | channel->irq_moderation = 0; |
1025 | channel->rx_queue.core_index = 0; | |
1026 | ||
1027 | return efx_ptp_probe(efx, channel); | |
1028 | } | |
1029 | ||
1030 | void efx_ptp_remove(struct efx_nic *efx) | |
1031 | { | |
7c236c43 SH |
1032 | if (!efx->ptp_data) |
1033 | return; | |
1034 | ||
ac36baf8 | 1035 | (void)efx_ptp_disable(efx); |
7c236c43 SH |
1036 | |
1037 | cancel_work_sync(&efx->ptp_data->work); | |
1038 | cancel_work_sync(&efx->ptp_data->pps_work); | |
1039 | ||
1040 | skb_queue_purge(&efx->ptp_data->rxq); | |
1041 | skb_queue_purge(&efx->ptp_data->txq); | |
1042 | ||
1043 | ptp_clock_unregister(efx->ptp_data->phc_clock); | |
1044 | ||
1045 | destroy_workqueue(efx->ptp_data->workwq); | |
1046 | destroy_workqueue(efx->ptp_data->pps_workwq); | |
1047 | ||
1048 | efx_nic_free_buffer(efx, &efx->ptp_data->start); | |
1049 | kfree(efx->ptp_data); | |
1050 | } | |
1051 | ||
ac36baf8 BH |
1052 | static void efx_ptp_remove_channel(struct efx_channel *channel) |
1053 | { | |
1054 | efx_ptp_remove(channel->efx); | |
1055 | } | |
1056 | ||
7c236c43 SH |
1057 | static void efx_ptp_get_channel_name(struct efx_channel *channel, |
1058 | char *buf, size_t len) | |
1059 | { | |
1060 | snprintf(buf, len, "%s-ptp", channel->efx->name); | |
1061 | } | |
1062 | ||
1063 | /* Determine whether this packet should be processed by the PTP module | |
1064 | * or transmitted conventionally. | |
1065 | */ | |
1066 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |
1067 | { | |
1068 | return efx->ptp_data && | |
1069 | efx->ptp_data->enabled && | |
1070 | skb->len >= PTP_MIN_LENGTH && | |
1071 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && | |
1072 | likely(skb->protocol == htons(ETH_P_IP)) && | |
e5a498e9 BH |
1073 | skb_transport_header_was_set(skb) && |
1074 | skb_network_header_len(skb) >= sizeof(struct iphdr) && | |
7c236c43 | 1075 | ip_hdr(skb)->protocol == IPPROTO_UDP && |
e5a498e9 BH |
1076 | skb_headlen(skb) >= |
1077 | skb_transport_offset(skb) + sizeof(struct udphdr) && | |
7c236c43 SH |
1078 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); |
1079 | } | |
1080 | ||
1081 | /* Receive a PTP packet. Packets are queued until the arrival of | |
1082 | * the receive timestamp from the MC - this will probably occur after the | |
1083 | * packet arrival because of the processing in the MC. | |
1084 | */ | |
4a74dc65 | 1085 | static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) |
7c236c43 SH |
1086 | { |
1087 | struct efx_nic *efx = channel->efx; | |
1088 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1089 | struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; | |
c939a316 | 1090 | u8 *match_data_012, *match_data_345; |
7c236c43 SH |
1091 | unsigned int version; |
1092 | ||
1093 | match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); | |
1094 | ||
1095 | /* Correct version? */ | |
1096 | if (ptp->mode == MC_CMD_PTP_MODE_V1) { | |
97d48a10 | 1097 | if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { |
4a74dc65 | 1098 | return false; |
7c236c43 SH |
1099 | } |
1100 | version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); | |
1101 | if (version != PTP_VERSION_V1) { | |
4a74dc65 | 1102 | return false; |
7c236c43 | 1103 | } |
c939a316 LE |
1104 | |
1105 | /* PTP V1 uses all six bytes of the UUID to match the packet | |
1106 | * to the timestamp | |
1107 | */ | |
1108 | match_data_012 = skb->data + PTP_V1_UUID_OFFSET; | |
1109 | match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; | |
7c236c43 | 1110 | } else { |
97d48a10 | 1111 | if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { |
4a74dc65 | 1112 | return false; |
7c236c43 SH |
1113 | } |
1114 | version = skb->data[PTP_V2_VERSION_OFFSET]; | |
7c236c43 | 1115 | if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { |
4a74dc65 | 1116 | return false; |
7c236c43 | 1117 | } |
c939a316 LE |
1118 | |
1119 | /* The original V2 implementation uses bytes 2-7 of | |
1120 | * the UUID to match the packet to the timestamp. This | |
1121 | * discards two of the bytes of the MAC address used | |
1122 | * to create the UUID (SF bug 33070). The PTP V2 | |
1123 | * enhanced mode fixes this issue and uses bytes 0-2 | |
1124 | * and byte 5-7 of the UUID. | |
1125 | */ | |
1126 | match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; | |
1127 | if (ptp->mode == MC_CMD_PTP_MODE_V2) { | |
1128 | match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; | |
1129 | } else { | |
1130 | match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; | |
1131 | BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); | |
1132 | } | |
7c236c43 SH |
1133 | } |
1134 | ||
1135 | /* Does this packet require timestamping? */ | |
1136 | if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { | |
1137 | struct skb_shared_hwtstamps *timestamps; | |
1138 | ||
1139 | match->state = PTP_PACKET_STATE_UNMATCHED; | |
1140 | ||
1141 | /* Clear all timestamps held: filled in later */ | |
1142 | timestamps = skb_hwtstamps(skb); | |
1143 | memset(timestamps, 0, sizeof(*timestamps)); | |
1144 | ||
c939a316 LE |
1145 | /* We expect the sequence number to be in the same position in |
1146 | * the packet for PTP V1 and V2 | |
1147 | */ | |
1148 | BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); | |
1149 | BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); | |
1150 | ||
7c236c43 | 1151 | /* Extract UUID/Sequence information */ |
c939a316 LE |
1152 | match->words[0] = (match_data_012[0] | |
1153 | (match_data_012[1] << 8) | | |
1154 | (match_data_012[2] << 16) | | |
1155 | (match_data_345[0] << 24)); | |
1156 | match->words[1] = (match_data_345[1] | | |
1157 | (match_data_345[2] << 8) | | |
7c236c43 SH |
1158 | (skb->data[PTP_V1_SEQUENCE_OFFSET + |
1159 | PTP_V1_SEQUENCE_LENGTH - 1] << | |
1160 | 16)); | |
1161 | } else { | |
1162 | match->state = PTP_PACKET_STATE_MATCH_UNWANTED; | |
1163 | } | |
1164 | ||
1165 | skb_queue_tail(&ptp->rxq, skb); | |
1166 | queue_work(ptp->workwq, &ptp->work); | |
4a74dc65 BH |
1167 | |
1168 | return true; | |
7c236c43 SH |
1169 | } |
1170 | ||
1171 | /* Transmit a PTP packet. This has to be transmitted by the MC | |
1172 | * itself, through an MCDI call. MCDI calls aren't permitted | |
1173 | * in the transmit path so defer the actual transmission to a suitable worker. | |
1174 | */ | |
1175 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |
1176 | { | |
1177 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1178 | ||
1179 | skb_queue_tail(&ptp->txq, skb); | |
1180 | ||
1181 | if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && | |
1182 | (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) | |
1183 | efx_xmit_hwtstamp_pending(skb); | |
1184 | queue_work(ptp->workwq, &ptp->work); | |
1185 | ||
1186 | return NETDEV_TX_OK; | |
1187 | } | |
1188 | ||
1189 | static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |
1190 | unsigned int new_mode) | |
1191 | { | |
1192 | if ((enable_wanted != efx->ptp_data->enabled) || | |
1193 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { | |
2ea4dc28 | 1194 | int rc = 0; |
7c236c43 SH |
1195 | |
1196 | if (enable_wanted) { | |
1197 | /* Change of mode requires disable */ | |
1198 | if (efx->ptp_data->enabled && | |
1199 | (efx->ptp_data->mode != new_mode)) { | |
1200 | efx->ptp_data->enabled = false; | |
1201 | rc = efx_ptp_stop(efx); | |
1202 | if (rc != 0) | |
1203 | return rc; | |
1204 | } | |
1205 | ||
1206 | /* Set new operating mode and establish | |
1207 | * baseline synchronisation, which must | |
1208 | * succeed. | |
1209 | */ | |
1210 | efx->ptp_data->mode = new_mode; | |
2ea4dc28 AR |
1211 | if (netif_running(efx->net_dev)) |
1212 | rc = efx_ptp_start(efx); | |
7c236c43 SH |
1213 | if (rc == 0) { |
1214 | rc = efx_ptp_synchronize(efx, | |
1215 | PTP_SYNC_ATTEMPTS * 2); | |
1216 | if (rc != 0) | |
1217 | efx_ptp_stop(efx); | |
1218 | } | |
1219 | } else { | |
1220 | rc = efx_ptp_stop(efx); | |
1221 | } | |
1222 | ||
1223 | if (rc != 0) | |
1224 | return rc; | |
1225 | ||
1226 | efx->ptp_data->enabled = enable_wanted; | |
1227 | } | |
1228 | ||
1229 | return 0; | |
1230 | } | |
1231 | ||
1232 | static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) | |
1233 | { | |
1234 | bool enable_wanted = false; | |
1235 | unsigned int new_mode; | |
1236 | int rc; | |
1237 | ||
1238 | if (init->flags) | |
1239 | return -EINVAL; | |
1240 | ||
1241 | if ((init->tx_type != HWTSTAMP_TX_OFF) && | |
1242 | (init->tx_type != HWTSTAMP_TX_ON)) | |
1243 | return -ERANGE; | |
1244 | ||
1245 | new_mode = efx->ptp_data->mode; | |
1246 | /* Determine whether any PTP HW operations are required */ | |
1247 | switch (init->rx_filter) { | |
1248 | case HWTSTAMP_FILTER_NONE: | |
1249 | break; | |
1250 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
1251 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
1252 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
1253 | init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
1254 | new_mode = MC_CMD_PTP_MODE_V1; | |
1255 | enable_wanted = true; | |
1256 | break; | |
1257 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
1258 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
1259 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
1260 | /* Although these three are accepted only IPV4 packets will be | |
1261 | * timestamped | |
1262 | */ | |
1263 | init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; | |
c939a316 | 1264 | new_mode = MC_CMD_PTP_MODE_V2_ENHANCED; |
7c236c43 SH |
1265 | enable_wanted = true; |
1266 | break; | |
1267 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
1268 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
1269 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
1270 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
1271 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
1272 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
1273 | /* Non-IP + IPv6 timestamping not supported */ | |
1274 | return -ERANGE; | |
1275 | break; | |
1276 | default: | |
1277 | return -ERANGE; | |
1278 | } | |
1279 | ||
1280 | if (init->tx_type != HWTSTAMP_TX_OFF) | |
1281 | enable_wanted = true; | |
1282 | ||
c939a316 LE |
1283 | /* Old versions of the firmware do not support the improved |
1284 | * UUID filtering option (SF bug 33070). If the firmware does | |
1285 | * not accept the enhanced mode, fall back to the standard PTP | |
1286 | * v2 UUID filtering. | |
1287 | */ | |
7c236c43 | 1288 | rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); |
c939a316 LE |
1289 | if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED)) |
1290 | rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2); | |
7c236c43 SH |
1291 | if (rc != 0) |
1292 | return rc; | |
1293 | ||
1294 | efx->ptp_data->config = *init; | |
1295 | ||
1296 | return 0; | |
1297 | } | |
1298 | ||
62ebac92 | 1299 | void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) |
7c236c43 | 1300 | { |
7c236c43 SH |
1301 | struct efx_ptp_data *ptp = efx->ptp_data; |
1302 | ||
1303 | if (!ptp) | |
62ebac92 | 1304 | return; |
7c236c43 | 1305 | |
62ebac92 BH |
1306 | ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | |
1307 | SOF_TIMESTAMPING_RX_HARDWARE | | |
1308 | SOF_TIMESTAMPING_RAW_HARDWARE); | |
7c236c43 SH |
1309 | ts_info->phc_index = ptp_clock_index(ptp->phc_clock); |
1310 | ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; | |
1311 | ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | | |
1312 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | | |
1313 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | | |
1314 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | | |
1315 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | | |
1316 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | | |
1317 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); | |
7c236c43 SH |
1318 | } |
1319 | ||
433dc9b3 | 1320 | int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) |
7c236c43 SH |
1321 | { |
1322 | struct hwtstamp_config config; | |
1323 | int rc; | |
1324 | ||
1325 | /* Not a PTP enabled port */ | |
1326 | if (!efx->ptp_data) | |
1327 | return -EOPNOTSUPP; | |
1328 | ||
1329 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
1330 | return -EFAULT; | |
1331 | ||
1332 | rc = efx_ptp_ts_init(efx, &config); | |
1333 | if (rc != 0) | |
1334 | return rc; | |
1335 | ||
1336 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) | |
1337 | ? -EFAULT : 0; | |
1338 | } | |
1339 | ||
433dc9b3 BH |
1340 | int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) |
1341 | { | |
1342 | if (!efx->ptp_data) | |
1343 | return -EOPNOTSUPP; | |
1344 | ||
1345 | return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, | |
1346 | sizeof(efx->ptp_data->config)) ? -EFAULT : 0; | |
1347 | } | |
1348 | ||
7c236c43 SH |
1349 | static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) |
1350 | { | |
1351 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1352 | ||
1353 | netif_err(efx, hw, efx->net_dev, | |
1354 | "PTP unexpected event length: got %d expected %d\n", | |
1355 | ptp->evt_frag_idx, expected_frag_len); | |
1356 | ptp->reset_required = true; | |
1357 | queue_work(ptp->workwq, &ptp->work); | |
1358 | } | |
1359 | ||
1360 | /* Process a completed receive event. Put it on the event queue and | |
1361 | * start worker thread. This is required because event and their | |
1362 | * correspoding packets may come in either order. | |
1363 | */ | |
1364 | static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1365 | { | |
1366 | struct efx_ptp_event_rx *evt = NULL; | |
1367 | ||
1368 | if (ptp->evt_frag_idx != 3) { | |
1369 | ptp_event_failure(efx, 3); | |
1370 | return; | |
1371 | } | |
1372 | ||
1373 | spin_lock_bh(&ptp->evt_lock); | |
1374 | if (!list_empty(&ptp->evt_free_list)) { | |
1375 | evt = list_first_entry(&ptp->evt_free_list, | |
1376 | struct efx_ptp_event_rx, link); | |
1377 | list_del(&evt->link); | |
1378 | ||
1379 | evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); | |
1380 | evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], | |
1381 | MCDI_EVENT_SRC) | | |
1382 | (EFX_QWORD_FIELD(ptp->evt_frags[1], | |
1383 | MCDI_EVENT_SRC) << 8) | | |
1384 | (EFX_QWORD_FIELD(ptp->evt_frags[0], | |
1385 | MCDI_EVENT_SRC) << 16)); | |
1386 | evt->hwtimestamp = ktime_set( | |
1387 | EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), | |
1388 | EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA)); | |
1389 | evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); | |
1390 | list_add_tail(&evt->link, &ptp->evt_list); | |
1391 | ||
1392 | queue_work(ptp->workwq, &ptp->work); | |
f3211600 LE |
1393 | } else if (!ptp->evt_overflow) { |
1394 | /* Log a warning message and set the event overflow flag. | |
1395 | * The message won't be logged again until the event queue | |
1396 | * becomes empty. | |
1397 | */ | |
1398 | netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); | |
1399 | ptp->evt_overflow = true; | |
7c236c43 SH |
1400 | } |
1401 | spin_unlock_bh(&ptp->evt_lock); | |
1402 | } | |
1403 | ||
1404 | static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1405 | { | |
1406 | int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); | |
1407 | if (ptp->evt_frag_idx != 1) { | |
1408 | ptp_event_failure(efx, 1); | |
1409 | return; | |
1410 | } | |
1411 | ||
1412 | netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); | |
1413 | } | |
1414 | ||
1415 | static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1416 | { | |
1417 | if (ptp->nic_ts_enabled) | |
1418 | queue_work(ptp->pps_workwq, &ptp->pps_work); | |
1419 | } | |
1420 | ||
1421 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) | |
1422 | { | |
1423 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1424 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); | |
1425 | ||
1426 | if (!ptp->enabled) | |
1427 | return; | |
1428 | ||
1429 | if (ptp->evt_frag_idx == 0) { | |
1430 | ptp->evt_code = code; | |
1431 | } else if (ptp->evt_code != code) { | |
1432 | netif_err(efx, hw, efx->net_dev, | |
1433 | "PTP out of sequence event %d\n", code); | |
1434 | ptp->evt_frag_idx = 0; | |
1435 | } | |
1436 | ||
1437 | ptp->evt_frags[ptp->evt_frag_idx++] = *ev; | |
1438 | if (!MCDI_EVENT_FIELD(*ev, CONT)) { | |
1439 | /* Process resulting event */ | |
1440 | switch (code) { | |
1441 | case MCDI_EVENT_CODE_PTP_RX: | |
1442 | ptp_event_rx(efx, ptp); | |
1443 | break; | |
1444 | case MCDI_EVENT_CODE_PTP_FAULT: | |
1445 | ptp_event_fault(efx, ptp); | |
1446 | break; | |
1447 | case MCDI_EVENT_CODE_PTP_PPS: | |
1448 | ptp_event_pps(efx, ptp); | |
1449 | break; | |
1450 | default: | |
1451 | netif_err(efx, hw, efx->net_dev, | |
1452 | "PTP unknown event %d\n", code); | |
1453 | break; | |
1454 | } | |
1455 | ptp->evt_frag_idx = 0; | |
1456 | } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { | |
1457 | netif_err(efx, hw, efx->net_dev, | |
1458 | "PTP too many event fragments\n"); | |
1459 | ptp->evt_frag_idx = 0; | |
1460 | } | |
1461 | } | |
1462 | ||
1463 | static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) | |
1464 | { | |
1465 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1466 | struct efx_ptp_data, | |
1467 | phc_clock_info); | |
ac36baf8 | 1468 | struct efx_nic *efx = ptp_data->efx; |
59cfc479 | 1469 | MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); |
7c236c43 SH |
1470 | s64 adjustment_ns; |
1471 | int rc; | |
1472 | ||
1473 | if (delta > MAX_PPB) | |
1474 | delta = MAX_PPB; | |
1475 | else if (delta < -MAX_PPB) | |
1476 | delta = -MAX_PPB; | |
1477 | ||
1478 | /* Convert ppb to fixed point ns. */ | |
1479 | adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >> | |
1480 | (PPB_EXTRA_BITS + MAX_PPB_BITS)); | |
1481 | ||
1482 | MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | |
c1d828bd | 1483 | MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); |
338f74df | 1484 | MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); |
7c236c43 SH |
1485 | MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); |
1486 | MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); | |
1487 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), | |
1488 | NULL, 0, NULL); | |
1489 | if (rc != 0) | |
1490 | return rc; | |
1491 | ||
cd6fe65e | 1492 | ptp_data->current_adjfreq = adjustment_ns; |
7c236c43 SH |
1493 | return 0; |
1494 | } | |
1495 | ||
1496 | static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) | |
1497 | { | |
1498 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1499 | struct efx_ptp_data, | |
1500 | phc_clock_info); | |
ac36baf8 | 1501 | struct efx_nic *efx = ptp_data->efx; |
7c236c43 | 1502 | struct timespec delta_ts = ns_to_timespec(delta); |
59cfc479 | 1503 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); |
7c236c43 SH |
1504 | |
1505 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | |
c1d828bd | 1506 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
cd6fe65e | 1507 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); |
7c236c43 SH |
1508 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); |
1509 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); | |
1510 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
1511 | NULL, 0, NULL); | |
1512 | } | |
1513 | ||
1514 | static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) | |
1515 | { | |
1516 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1517 | struct efx_ptp_data, | |
1518 | phc_clock_info); | |
ac36baf8 | 1519 | struct efx_nic *efx = ptp_data->efx; |
59cfc479 BH |
1520 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); |
1521 | MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); | |
7c236c43 SH |
1522 | int rc; |
1523 | ||
1524 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); | |
c1d828bd | 1525 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
7c236c43 SH |
1526 | |
1527 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
1528 | outbuf, sizeof(outbuf), NULL); | |
1529 | if (rc != 0) | |
1530 | return rc; | |
1531 | ||
1532 | ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS); | |
1533 | ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS); | |
1534 | return 0; | |
1535 | } | |
1536 | ||
1537 | static int efx_phc_settime(struct ptp_clock_info *ptp, | |
1538 | const struct timespec *e_ts) | |
1539 | { | |
1540 | /* Get the current NIC time, efx_phc_gettime. | |
1541 | * Subtract from the desired time to get the offset | |
1542 | * call efx_phc_adjtime with the offset | |
1543 | */ | |
1544 | int rc; | |
1545 | struct timespec time_now; | |
1546 | struct timespec delta; | |
1547 | ||
1548 | rc = efx_phc_gettime(ptp, &time_now); | |
1549 | if (rc != 0) | |
1550 | return rc; | |
1551 | ||
1552 | delta = timespec_sub(*e_ts, time_now); | |
1553 | ||
56567c6f | 1554 | rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta)); |
7c236c43 SH |
1555 | if (rc != 0) |
1556 | return rc; | |
1557 | ||
1558 | return 0; | |
1559 | } | |
1560 | ||
1561 | static int efx_phc_enable(struct ptp_clock_info *ptp, | |
1562 | struct ptp_clock_request *request, | |
1563 | int enable) | |
1564 | { | |
1565 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1566 | struct efx_ptp_data, | |
1567 | phc_clock_info); | |
1568 | if (request->type != PTP_CLK_REQ_PPS) | |
1569 | return -EOPNOTSUPP; | |
1570 | ||
1571 | ptp_data->nic_ts_enabled = !!enable; | |
1572 | return 0; | |
1573 | } | |
1574 | ||
1575 | static const struct efx_channel_type efx_ptp_channel_type = { | |
1576 | .handle_no_channel = efx_ptp_handle_no_channel, | |
1577 | .pre_probe = efx_ptp_probe_channel, | |
1578 | .post_remove = efx_ptp_remove_channel, | |
1579 | .get_name = efx_ptp_get_channel_name, | |
1580 | /* no copy operation; there is no need to reallocate this channel */ | |
1581 | .receive_skb = efx_ptp_rx, | |
1582 | .keep_eventq = false, | |
1583 | }; | |
1584 | ||
ac36baf8 | 1585 | void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) |
7c236c43 SH |
1586 | { |
1587 | /* Check whether PTP is implemented on this NIC. The DISABLE | |
1588 | * operation will succeed if and only if it is implemented. | |
1589 | */ | |
1590 | if (efx_ptp_disable(efx) == 0) | |
1591 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = | |
1592 | &efx_ptp_channel_type; | |
1593 | } | |
2ea4dc28 AR |
1594 | |
1595 | void efx_ptp_start_datapath(struct efx_nic *efx) | |
1596 | { | |
1597 | if (efx_ptp_restart(efx)) | |
1598 | netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); | |
1599 | } | |
1600 | ||
1601 | void efx_ptp_stop_datapath(struct efx_nic *efx) | |
1602 | { | |
1603 | efx_ptp_stop(efx); | |
1604 | } |