Commit | Line | Data |
---|---|---|
7c236c43 SH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2011 Solarflare Communications Inc. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation, incorporated herein by reference. | |
8 | */ | |
9 | ||
10 | /* Theory of operation: | |
11 | * | |
12 | * PTP support is assisted by firmware running on the MC, which provides | |
13 | * the hardware timestamping capabilities. Both transmitted and received | |
14 | * PTP event packets are queued onto internal queues for subsequent processing; | |
15 | * this is because the MC operations are relatively long and would block | |
16 | * block NAPI/interrupt operation. | |
17 | * | |
18 | * Receive event processing: | |
19 | * The event contains the packet's UUID and sequence number, together | |
20 | * with the hardware timestamp. The PTP receive packet queue is searched | |
21 | * for this UUID/sequence number and, if found, put on a pending queue. | |
22 | * Packets not matching are delivered without timestamps (MCDI events will | |
23 | * always arrive after the actual packet). | |
24 | * It is important for the operation of the PTP protocol that the ordering | |
25 | * of packets between the event and general port is maintained. | |
26 | * | |
27 | * Work queue processing: | |
28 | * If work waiting, synchronise host/hardware time | |
29 | * | |
30 | * Transmit: send packet through MC, which returns the transmission time | |
31 | * that is converted to an appropriate timestamp. | |
32 | * | |
33 | * Receive: the packet's reception time is converted to an appropriate | |
34 | * timestamp. | |
35 | */ | |
36 | #include <linux/ip.h> | |
37 | #include <linux/udp.h> | |
38 | #include <linux/time.h> | |
39 | #include <linux/ktime.h> | |
40 | #include <linux/module.h> | |
41 | #include <linux/net_tstamp.h> | |
42 | #include <linux/pps_kernel.h> | |
43 | #include <linux/ptp_clock_kernel.h> | |
44 | #include "net_driver.h" | |
45 | #include "efx.h" | |
46 | #include "mcdi.h" | |
47 | #include "mcdi_pcol.h" | |
48 | #include "io.h" | |
8b8a95a1 | 49 | #include "farch_regs.h" |
7c236c43 SH |
50 | #include "nic.h" |
51 | ||
52 | /* Maximum number of events expected to make up a PTP event */ | |
53 | #define MAX_EVENT_FRAGS 3 | |
54 | ||
55 | /* Maximum delay, ms, to begin synchronisation */ | |
56 | #define MAX_SYNCHRONISE_WAIT_MS 2 | |
57 | ||
58 | /* How long, at most, to spend synchronising */ | |
59 | #define SYNCHRONISE_PERIOD_NS 250000 | |
60 | ||
61 | /* How often to update the shared memory time */ | |
62 | #define SYNCHRONISATION_GRANULARITY_NS 200 | |
63 | ||
64 | /* Minimum permitted length of a (corrected) synchronisation time */ | |
65 | #define MIN_SYNCHRONISATION_NS 120 | |
66 | ||
67 | /* Maximum permitted length of a (corrected) synchronisation time */ | |
68 | #define MAX_SYNCHRONISATION_NS 1000 | |
69 | ||
70 | /* How many (MC) receive events that can be queued */ | |
71 | #define MAX_RECEIVE_EVENTS 8 | |
72 | ||
73 | /* Length of (modified) moving average. */ | |
74 | #define AVERAGE_LENGTH 16 | |
75 | ||
76 | /* How long an unmatched event or packet can be held */ | |
77 | #define PKT_EVENT_LIFETIME_MS 10 | |
78 | ||
79 | /* Offsets into PTP packet for identification. These offsets are from the | |
80 | * start of the IP header, not the MAC header. Note that neither PTP V1 nor | |
81 | * PTP V2 permit the use of IPV4 options. | |
82 | */ | |
83 | #define PTP_DPORT_OFFSET 22 | |
84 | ||
85 | #define PTP_V1_VERSION_LENGTH 2 | |
86 | #define PTP_V1_VERSION_OFFSET 28 | |
87 | ||
88 | #define PTP_V1_UUID_LENGTH 6 | |
89 | #define PTP_V1_UUID_OFFSET 50 | |
90 | ||
91 | #define PTP_V1_SEQUENCE_LENGTH 2 | |
92 | #define PTP_V1_SEQUENCE_OFFSET 58 | |
93 | ||
94 | /* The minimum length of a PTP V1 packet for offsets, etc. to be valid: | |
95 | * includes IP header. | |
96 | */ | |
97 | #define PTP_V1_MIN_LENGTH 64 | |
98 | ||
99 | #define PTP_V2_VERSION_LENGTH 1 | |
100 | #define PTP_V2_VERSION_OFFSET 29 | |
101 | ||
c939a316 LE |
102 | #define PTP_V2_UUID_LENGTH 8 |
103 | #define PTP_V2_UUID_OFFSET 48 | |
104 | ||
7c236c43 SH |
105 | /* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), |
106 | * the MC only captures the last six bytes of the clock identity. These values | |
107 | * reflect those, not the ones used in the standard. The standard permits | |
108 | * mapping of V1 UUIDs to V2 UUIDs with these same values. | |
109 | */ | |
110 | #define PTP_V2_MC_UUID_LENGTH 6 | |
111 | #define PTP_V2_MC_UUID_OFFSET 50 | |
112 | ||
113 | #define PTP_V2_SEQUENCE_LENGTH 2 | |
114 | #define PTP_V2_SEQUENCE_OFFSET 58 | |
115 | ||
116 | /* The minimum length of a PTP V2 packet for offsets, etc. to be valid: | |
117 | * includes IP header. | |
118 | */ | |
119 | #define PTP_V2_MIN_LENGTH 63 | |
120 | ||
121 | #define PTP_MIN_LENGTH 63 | |
122 | ||
123 | #define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ | |
124 | #define PTP_EVENT_PORT 319 | |
125 | #define PTP_GENERAL_PORT 320 | |
126 | ||
127 | /* Annoyingly the format of the version numbers are different between | |
128 | * versions 1 and 2 so it isn't possible to simply look for 1 or 2. | |
129 | */ | |
130 | #define PTP_VERSION_V1 1 | |
131 | ||
132 | #define PTP_VERSION_V2 2 | |
133 | #define PTP_VERSION_V2_MASK 0x0f | |
134 | ||
135 | enum ptp_packet_state { | |
136 | PTP_PACKET_STATE_UNMATCHED = 0, | |
137 | PTP_PACKET_STATE_MATCHED, | |
138 | PTP_PACKET_STATE_TIMED_OUT, | |
139 | PTP_PACKET_STATE_MATCH_UNWANTED | |
140 | }; | |
141 | ||
142 | /* NIC synchronised with single word of time only comprising | |
143 | * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. | |
144 | */ | |
145 | #define MC_NANOSECOND_BITS 30 | |
146 | #define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) | |
147 | #define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) | |
148 | ||
149 | /* Maximum parts-per-billion adjustment that is acceptable */ | |
150 | #define MAX_PPB 1000000 | |
151 | ||
152 | /* Number of bits required to hold the above */ | |
153 | #define MAX_PPB_BITS 20 | |
154 | ||
155 | /* Number of extra bits allowed when calculating fractional ns. | |
156 | * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should | |
157 | * be less than 63. | |
158 | */ | |
159 | #define PPB_EXTRA_BITS 2 | |
160 | ||
161 | /* Precalculate scale word to avoid long long division at runtime */ | |
162 | #define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\ | |
163 | MAX_PPB_BITS)) / 1000000000LL) | |
164 | ||
165 | #define PTP_SYNC_ATTEMPTS 4 | |
166 | ||
167 | /** | |
168 | * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. | |
169 | * @words: UUID and (partial) sequence number | |
170 | * @expiry: Time after which the packet should be delivered irrespective of | |
171 | * event arrival. | |
172 | * @state: The state of the packet - whether it is ready for processing or | |
173 | * whether that is of no interest. | |
174 | */ | |
175 | struct efx_ptp_match { | |
176 | u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; | |
177 | unsigned long expiry; | |
178 | enum ptp_packet_state state; | |
179 | }; | |
180 | ||
181 | /** | |
182 | * struct efx_ptp_event_rx - A PTP receive event (from MC) | |
183 | * @seq0: First part of (PTP) UUID | |
184 | * @seq1: Second part of (PTP) UUID and sequence number | |
185 | * @hwtimestamp: Event timestamp | |
186 | */ | |
187 | struct efx_ptp_event_rx { | |
188 | struct list_head link; | |
189 | u32 seq0; | |
190 | u32 seq1; | |
191 | ktime_t hwtimestamp; | |
192 | unsigned long expiry; | |
193 | }; | |
194 | ||
195 | /** | |
196 | * struct efx_ptp_timeset - Synchronisation between host and MC | |
197 | * @host_start: Host time immediately before hardware timestamp taken | |
198 | * @seconds: Hardware timestamp, seconds | |
199 | * @nanoseconds: Hardware timestamp, nanoseconds | |
200 | * @host_end: Host time immediately after hardware timestamp taken | |
201 | * @waitns: Number of nanoseconds between hardware timestamp being read and | |
202 | * host end time being seen | |
203 | * @window: Difference of host_end and host_start | |
204 | * @valid: Whether this timeset is valid | |
205 | */ | |
206 | struct efx_ptp_timeset { | |
207 | u32 host_start; | |
208 | u32 seconds; | |
209 | u32 nanoseconds; | |
210 | u32 host_end; | |
211 | u32 waitns; | |
212 | u32 window; /* Derived: end - start, allowing for wrap */ | |
213 | }; | |
214 | ||
215 | /** | |
216 | * struct efx_ptp_data - Precision Time Protocol (PTP) state | |
217 | * @channel: The PTP channel | |
218 | * @rxq: Receive queue (awaiting timestamps) | |
219 | * @txq: Transmit queue | |
220 | * @evt_list: List of MC receive events awaiting packets | |
221 | * @evt_free_list: List of free events | |
222 | * @evt_lock: Lock for manipulating evt_list and evt_free_list | |
223 | * @rx_evts: Instantiated events (on evt_list and evt_free_list) | |
224 | * @workwq: Work queue for processing pending PTP operations | |
225 | * @work: Work task | |
226 | * @reset_required: A serious error has occurred and the PTP task needs to be | |
227 | * reset (disable, enable). | |
228 | * @rxfilter_event: Receive filter when operating | |
229 | * @rxfilter_general: Receive filter when operating | |
230 | * @config: Current timestamp configuration | |
231 | * @enabled: PTP operation enabled | |
232 | * @mode: Mode in which PTP operating (PTP version) | |
233 | * @evt_frags: Partly assembled PTP events | |
234 | * @evt_frag_idx: Current fragment number | |
235 | * @evt_code: Last event code | |
236 | * @start: Address at which MC indicates ready for synchronisation | |
237 | * @host_time_pps: Host time at last PPS | |
238 | * @last_sync_ns: Last number of nanoseconds between readings when synchronising | |
239 | * @base_sync_ns: Number of nanoseconds for last synchronisation. | |
240 | * @base_sync_valid: Whether base_sync_time is valid. | |
241 | * @current_adjfreq: Current ppb adjustment. | |
242 | * @phc_clock: Pointer to registered phc device | |
243 | * @phc_clock_info: Registration structure for phc device | |
244 | * @pps_work: pps work task for handling pps events | |
245 | * @pps_workwq: pps work queue | |
246 | * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled | |
247 | * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids | |
248 | * allocations in main data path). | |
249 | * @debug_ptp_dir: PTP debugfs directory | |
250 | * @missed_rx_sync: Number of packets received without syncrhonisation. | |
251 | * @good_syncs: Number of successful synchronisations. | |
252 | * @no_time_syncs: Number of synchronisations with no good times. | |
253 | * @bad_sync_durations: Number of synchronisations with bad durations. | |
254 | * @bad_syncs: Number of failed synchronisations. | |
255 | * @last_sync_time: Number of nanoseconds for last synchronisation. | |
256 | * @sync_timeouts: Number of synchronisation timeouts | |
257 | * @fast_syncs: Number of synchronisations requiring short delay | |
258 | * @min_sync_delta: Minimum time between event and synchronisation | |
259 | * @max_sync_delta: Maximum time between event and synchronisation | |
260 | * @average_sync_delta: Average time between event and synchronisation. | |
261 | * Modified moving average. | |
262 | * @last_sync_delta: Last time between event and synchronisation | |
263 | * @mc_stats: Context value for MC statistics | |
264 | * @timeset: Last set of synchronisation statistics. | |
265 | */ | |
266 | struct efx_ptp_data { | |
267 | struct efx_channel *channel; | |
268 | struct sk_buff_head rxq; | |
269 | struct sk_buff_head txq; | |
270 | struct list_head evt_list; | |
271 | struct list_head evt_free_list; | |
272 | spinlock_t evt_lock; | |
273 | struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; | |
274 | struct workqueue_struct *workwq; | |
275 | struct work_struct work; | |
276 | bool reset_required; | |
277 | u32 rxfilter_event; | |
278 | u32 rxfilter_general; | |
279 | bool rxfilter_installed; | |
280 | struct hwtstamp_config config; | |
281 | bool enabled; | |
282 | unsigned int mode; | |
283 | efx_qword_t evt_frags[MAX_EVENT_FRAGS]; | |
284 | int evt_frag_idx; | |
285 | int evt_code; | |
286 | struct efx_buffer start; | |
287 | struct pps_event_time host_time_pps; | |
288 | unsigned last_sync_ns; | |
289 | unsigned base_sync_ns; | |
290 | bool base_sync_valid; | |
291 | s64 current_adjfreq; | |
292 | struct ptp_clock *phc_clock; | |
293 | struct ptp_clock_info phc_clock_info; | |
294 | struct work_struct pps_work; | |
295 | struct workqueue_struct *pps_workwq; | |
296 | bool nic_ts_enabled; | |
c5bb0e98 | 297 | MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); |
7c236c43 SH |
298 | struct efx_ptp_timeset |
299 | timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; | |
300 | }; | |
301 | ||
302 | static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); | |
303 | static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); | |
304 | static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts); | |
305 | static int efx_phc_settime(struct ptp_clock_info *ptp, | |
306 | const struct timespec *e_ts); | |
307 | static int efx_phc_enable(struct ptp_clock_info *ptp, | |
308 | struct ptp_clock_request *request, int on); | |
309 | ||
310 | /* Enable MCDI PTP support. */ | |
311 | static int efx_ptp_enable(struct efx_nic *efx) | |
312 | { | |
59cfc479 | 313 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); |
7c236c43 SH |
314 | |
315 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); | |
316 | MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, | |
317 | efx->ptp_data->channel->channel); | |
318 | MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); | |
319 | ||
320 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
321 | NULL, 0, NULL); | |
322 | } | |
323 | ||
324 | /* Disable MCDI PTP support. | |
325 | * | |
326 | * Note that this function should never rely on the presence of ptp_data - | |
327 | * may be called before that exists. | |
328 | */ | |
329 | static int efx_ptp_disable(struct efx_nic *efx) | |
330 | { | |
59cfc479 | 331 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); |
7c236c43 SH |
332 | |
333 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); | |
334 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
335 | NULL, 0, NULL); | |
336 | } | |
337 | ||
338 | static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) | |
339 | { | |
340 | struct sk_buff *skb; | |
341 | ||
342 | while ((skb = skb_dequeue(q))) { | |
343 | local_bh_disable(); | |
344 | netif_receive_skb(skb); | |
345 | local_bh_enable(); | |
346 | } | |
347 | } | |
348 | ||
349 | static void efx_ptp_handle_no_channel(struct efx_nic *efx) | |
350 | { | |
351 | netif_err(efx, drv, efx->net_dev, | |
352 | "ERROR: PTP requires MSI-X and 1 additional interrupt" | |
353 | "vector. PTP disabled\n"); | |
354 | } | |
355 | ||
356 | /* Repeatedly send the host time to the MC which will capture the hardware | |
357 | * time. | |
358 | */ | |
359 | static void efx_ptp_send_times(struct efx_nic *efx, | |
360 | struct pps_event_time *last_time) | |
361 | { | |
362 | struct pps_event_time now; | |
363 | struct timespec limit; | |
364 | struct efx_ptp_data *ptp = efx->ptp_data; | |
365 | struct timespec start; | |
366 | int *mc_running = ptp->start.addr; | |
367 | ||
368 | pps_get_ts(&now); | |
369 | start = now.ts_real; | |
370 | limit = now.ts_real; | |
371 | timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); | |
372 | ||
373 | /* Write host time for specified period or until MC is done */ | |
374 | while ((timespec_compare(&now.ts_real, &limit) < 0) && | |
375 | ACCESS_ONCE(*mc_running)) { | |
376 | struct timespec update_time; | |
377 | unsigned int host_time; | |
378 | ||
379 | /* Don't update continuously to avoid saturating the PCIe bus */ | |
380 | update_time = now.ts_real; | |
381 | timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); | |
382 | do { | |
383 | pps_get_ts(&now); | |
384 | } while ((timespec_compare(&now.ts_real, &update_time) < 0) && | |
385 | ACCESS_ONCE(*mc_running)); | |
386 | ||
387 | /* Synchronise NIC with single word of time only */ | |
388 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | | |
389 | now.ts_real.tv_nsec); | |
390 | /* Update host time in NIC memory */ | |
391 | _efx_writed(efx, cpu_to_le32(host_time), | |
392 | FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); | |
393 | } | |
394 | *last_time = now; | |
395 | } | |
396 | ||
397 | /* Read a timeset from the MC's results and partial process. */ | |
c5bb0e98 BH |
398 | static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), |
399 | struct efx_ptp_timeset *timeset) | |
7c236c43 SH |
400 | { |
401 | unsigned start_ns, end_ns; | |
402 | ||
403 | timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); | |
404 | timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS); | |
405 | timeset->nanoseconds = MCDI_DWORD(data, | |
406 | PTP_OUT_SYNCHRONIZE_NANOSECONDS); | |
407 | timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), | |
408 | timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); | |
409 | ||
410 | /* Ignore seconds */ | |
411 | start_ns = timeset->host_start & MC_NANOSECOND_MASK; | |
412 | end_ns = timeset->host_end & MC_NANOSECOND_MASK; | |
413 | /* Allow for rollover */ | |
414 | if (end_ns < start_ns) | |
415 | end_ns += NSEC_PER_SEC; | |
416 | /* Determine duration of operation */ | |
417 | timeset->window = end_ns - start_ns; | |
418 | } | |
419 | ||
420 | /* Process times received from MC. | |
421 | * | |
422 | * Extract times from returned results, and establish the minimum value | |
423 | * seen. The minimum value represents the "best" possible time and events | |
424 | * too much greater than this are rejected - the machine is, perhaps, too | |
425 | * busy. A number of readings are taken so that, hopefully, at least one good | |
426 | * synchronisation will be seen in the results. | |
427 | */ | |
c5bb0e98 BH |
428 | static int |
429 | efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), | |
430 | size_t response_length, | |
431 | const struct pps_event_time *last_time) | |
7c236c43 | 432 | { |
c5bb0e98 BH |
433 | unsigned number_readings = |
434 | MCDI_VAR_ARRAY_LEN(response_length, | |
435 | PTP_OUT_SYNCHRONIZE_TIMESET); | |
7c236c43 | 436 | unsigned i; |
7c236c43 SH |
437 | unsigned total; |
438 | unsigned ngood = 0; | |
439 | unsigned last_good = 0; | |
440 | struct efx_ptp_data *ptp = efx->ptp_data; | |
7c236c43 SH |
441 | u32 last_sec; |
442 | u32 start_sec; | |
443 | struct timespec delta; | |
444 | ||
445 | if (number_readings == 0) | |
446 | return -EAGAIN; | |
447 | ||
9230451a LE |
448 | /* Read the set of results and increment stats for any results that |
449 | * appera to be erroneous. | |
7c236c43 SH |
450 | */ |
451 | for (i = 0; i < number_readings; i++) { | |
c5bb0e98 BH |
452 | efx_ptp_read_timeset( |
453 | MCDI_ARRAY_STRUCT_PTR(synch_buf, | |
454 | PTP_OUT_SYNCHRONIZE_TIMESET, i), | |
455 | &ptp->timeset[i]); | |
7c236c43 SH |
456 | } |
457 | ||
9230451a LE |
458 | /* Find the last good host-MC synchronization result. The MC times |
459 | * when it finishes reading the host time so the corrected window time | |
460 | * should be fairly constant for a given platform. | |
7c236c43 SH |
461 | */ |
462 | total = 0; | |
463 | for (i = 0; i < number_readings; i++) | |
464 | if (ptp->timeset[i].window > ptp->timeset[i].waitns) { | |
465 | unsigned win; | |
466 | ||
467 | win = ptp->timeset[i].window - ptp->timeset[i].waitns; | |
468 | if (win >= MIN_SYNCHRONISATION_NS && | |
469 | win < MAX_SYNCHRONISATION_NS) { | |
470 | total += ptp->timeset[i].window; | |
471 | ngood++; | |
472 | last_good = i; | |
473 | } | |
474 | } | |
475 | ||
476 | if (ngood == 0) { | |
477 | netif_warn(efx, drv, efx->net_dev, | |
9230451a LE |
478 | "PTP no suitable synchronisations %dns\n", |
479 | ptp->base_sync_ns); | |
7c236c43 SH |
480 | return -EAGAIN; |
481 | } | |
482 | ||
483 | /* Average minimum this synchronisation */ | |
484 | ptp->last_sync_ns = DIV_ROUND_UP(total, ngood); | |
485 | if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) { | |
486 | ptp->base_sync_valid = true; | |
487 | ptp->base_sync_ns = ptp->last_sync_ns; | |
488 | } | |
489 | ||
490 | /* Calculate delay from actual PPS to last_time */ | |
491 | delta.tv_nsec = | |
492 | ptp->timeset[last_good].nanoseconds + | |
493 | last_time->ts_real.tv_nsec - | |
494 | (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); | |
495 | ||
496 | /* It is possible that the seconds rolled over between taking | |
497 | * the start reading and the last value written by the host. The | |
498 | * timescales are such that a gap of more than one second is never | |
499 | * expected. | |
500 | */ | |
501 | start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; | |
502 | last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; | |
503 | if (start_sec != last_sec) { | |
504 | if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { | |
505 | netif_warn(efx, hw, efx->net_dev, | |
506 | "PTP bad synchronisation seconds\n"); | |
507 | return -EAGAIN; | |
508 | } else { | |
509 | delta.tv_sec = 1; | |
510 | } | |
511 | } else { | |
512 | delta.tv_sec = 0; | |
513 | } | |
514 | ||
515 | ptp->host_time_pps = *last_time; | |
516 | pps_sub_ts(&ptp->host_time_pps, delta); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | /* Synchronize times between the host and the MC */ | |
522 | static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |
523 | { | |
524 | struct efx_ptp_data *ptp = efx->ptp_data; | |
59cfc479 | 525 | MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); |
7c236c43 SH |
526 | size_t response_length; |
527 | int rc; | |
528 | unsigned long timeout; | |
529 | struct pps_event_time last_time = {}; | |
530 | unsigned int loops = 0; | |
531 | int *start = ptp->start.addr; | |
532 | ||
533 | MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); | |
534 | MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, | |
535 | num_readings); | |
338f74df BH |
536 | MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, |
537 | ptp->start.dma_addr); | |
7c236c43 SH |
538 | |
539 | /* Clear flag that signals MC ready */ | |
540 | ACCESS_ONCE(*start) = 0; | |
df2cd8af BH |
541 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, |
542 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); | |
543 | EFX_BUG_ON_PARANOID(rc); | |
7c236c43 SH |
544 | |
545 | /* Wait for start from MCDI (or timeout) */ | |
546 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); | |
547 | while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { | |
548 | udelay(20); /* Usually start MCDI execution quickly */ | |
549 | loops++; | |
550 | } | |
551 | ||
552 | if (ACCESS_ONCE(*start)) | |
553 | efx_ptp_send_times(efx, &last_time); | |
554 | ||
555 | /* Collect results */ | |
556 | rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, | |
557 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN, | |
558 | synch_buf, sizeof(synch_buf), | |
559 | &response_length); | |
560 | if (rc == 0) | |
561 | rc = efx_ptp_process_times(efx, synch_buf, response_length, | |
562 | &last_time); | |
563 | ||
564 | return rc; | |
565 | } | |
566 | ||
567 | /* Transmit a PTP packet, via the MCDI interface, to the wire. */ | |
568 | static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) | |
569 | { | |
c5bb0e98 | 570 | struct efx_ptp_data *ptp_data = efx->ptp_data; |
7c236c43 SH |
571 | struct skb_shared_hwtstamps timestamps; |
572 | int rc = -EIO; | |
59cfc479 | 573 | MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); |
9528b921 | 574 | size_t len; |
7c236c43 | 575 | |
c5bb0e98 BH |
576 | MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); |
577 | MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); | |
7c236c43 SH |
578 | if (skb_shinfo(skb)->nr_frags != 0) { |
579 | rc = skb_linearize(skb); | |
580 | if (rc != 0) | |
581 | goto fail; | |
582 | } | |
583 | ||
584 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
585 | rc = skb_checksum_help(skb); | |
586 | if (rc != 0) | |
587 | goto fail; | |
588 | } | |
589 | skb_copy_from_linear_data(skb, | |
c5bb0e98 BH |
590 | MCDI_PTR(ptp_data->txbuf, |
591 | PTP_IN_TRANSMIT_PACKET), | |
9528b921 BH |
592 | skb->len); |
593 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, | |
594 | ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), | |
595 | txtime, sizeof(txtime), &len); | |
7c236c43 SH |
596 | if (rc != 0) |
597 | goto fail; | |
598 | ||
599 | memset(×tamps, 0, sizeof(timestamps)); | |
600 | timestamps.hwtstamp = ktime_set( | |
601 | MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS), | |
602 | MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS)); | |
603 | ||
604 | skb_tstamp_tx(skb, ×tamps); | |
605 | ||
606 | rc = 0; | |
607 | ||
608 | fail: | |
609 | dev_kfree_skb(skb); | |
610 | ||
611 | return rc; | |
612 | } | |
613 | ||
614 | static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) | |
615 | { | |
616 | struct efx_ptp_data *ptp = efx->ptp_data; | |
617 | struct list_head *cursor; | |
618 | struct list_head *next; | |
619 | ||
620 | /* Drop time-expired events */ | |
621 | spin_lock_bh(&ptp->evt_lock); | |
622 | if (!list_empty(&ptp->evt_list)) { | |
623 | list_for_each_safe(cursor, next, &ptp->evt_list) { | |
624 | struct efx_ptp_event_rx *evt; | |
625 | ||
626 | evt = list_entry(cursor, struct efx_ptp_event_rx, | |
627 | link); | |
628 | if (time_after(jiffies, evt->expiry)) { | |
9545f4e2 | 629 | list_move(&evt->link, &ptp->evt_free_list); |
7c236c43 SH |
630 | netif_warn(efx, hw, efx->net_dev, |
631 | "PTP rx event dropped\n"); | |
632 | } | |
633 | } | |
634 | } | |
635 | spin_unlock_bh(&ptp->evt_lock); | |
636 | } | |
637 | ||
638 | static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, | |
639 | struct sk_buff *skb) | |
640 | { | |
641 | struct efx_ptp_data *ptp = efx->ptp_data; | |
642 | bool evts_waiting; | |
643 | struct list_head *cursor; | |
644 | struct list_head *next; | |
645 | struct efx_ptp_match *match; | |
646 | enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; | |
647 | ||
648 | spin_lock_bh(&ptp->evt_lock); | |
649 | evts_waiting = !list_empty(&ptp->evt_list); | |
650 | spin_unlock_bh(&ptp->evt_lock); | |
651 | ||
652 | if (!evts_waiting) | |
653 | return PTP_PACKET_STATE_UNMATCHED; | |
654 | ||
655 | match = (struct efx_ptp_match *)skb->cb; | |
656 | /* Look for a matching timestamp in the event queue */ | |
657 | spin_lock_bh(&ptp->evt_lock); | |
658 | list_for_each_safe(cursor, next, &ptp->evt_list) { | |
659 | struct efx_ptp_event_rx *evt; | |
660 | ||
661 | evt = list_entry(cursor, struct efx_ptp_event_rx, link); | |
662 | if ((evt->seq0 == match->words[0]) && | |
663 | (evt->seq1 == match->words[1])) { | |
664 | struct skb_shared_hwtstamps *timestamps; | |
665 | ||
666 | /* Match - add in hardware timestamp */ | |
667 | timestamps = skb_hwtstamps(skb); | |
668 | timestamps->hwtstamp = evt->hwtimestamp; | |
669 | ||
670 | match->state = PTP_PACKET_STATE_MATCHED; | |
671 | rc = PTP_PACKET_STATE_MATCHED; | |
9545f4e2 | 672 | list_move(&evt->link, &ptp->evt_free_list); |
7c236c43 SH |
673 | break; |
674 | } | |
675 | } | |
676 | spin_unlock_bh(&ptp->evt_lock); | |
677 | ||
678 | return rc; | |
679 | } | |
680 | ||
681 | /* Process any queued receive events and corresponding packets | |
682 | * | |
683 | * q is returned with all the packets that are ready for delivery. | |
684 | * true is returned if at least one of those packets requires | |
685 | * synchronisation. | |
686 | */ | |
687 | static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) | |
688 | { | |
689 | struct efx_ptp_data *ptp = efx->ptp_data; | |
690 | bool rc = false; | |
691 | struct sk_buff *skb; | |
692 | ||
693 | while ((skb = skb_dequeue(&ptp->rxq))) { | |
694 | struct efx_ptp_match *match; | |
695 | ||
696 | match = (struct efx_ptp_match *)skb->cb; | |
697 | if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { | |
698 | __skb_queue_tail(q, skb); | |
699 | } else if (efx_ptp_match_rx(efx, skb) == | |
700 | PTP_PACKET_STATE_MATCHED) { | |
701 | rc = true; | |
702 | __skb_queue_tail(q, skb); | |
703 | } else if (time_after(jiffies, match->expiry)) { | |
704 | match->state = PTP_PACKET_STATE_TIMED_OUT; | |
705 | netif_warn(efx, rx_err, efx->net_dev, | |
706 | "PTP packet - no timestamp seen\n"); | |
707 | __skb_queue_tail(q, skb); | |
708 | } else { | |
709 | /* Replace unprocessed entry and stop */ | |
710 | skb_queue_head(&ptp->rxq, skb); | |
711 | break; | |
712 | } | |
713 | } | |
714 | ||
715 | return rc; | |
716 | } | |
717 | ||
718 | /* Complete processing of a received packet */ | |
719 | static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) | |
720 | { | |
721 | local_bh_disable(); | |
722 | netif_receive_skb(skb); | |
723 | local_bh_enable(); | |
724 | } | |
725 | ||
726 | static int efx_ptp_start(struct efx_nic *efx) | |
727 | { | |
728 | struct efx_ptp_data *ptp = efx->ptp_data; | |
729 | struct efx_filter_spec rxfilter; | |
730 | int rc; | |
731 | ||
732 | ptp->reset_required = false; | |
733 | ||
734 | /* Must filter on both event and general ports to ensure | |
735 | * that there is no packet re-ordering. | |
736 | */ | |
737 | efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, | |
738 | efx_rx_queue_index( | |
739 | efx_channel_get_rx_queue(ptp->channel))); | |
740 | rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, | |
741 | htonl(PTP_ADDRESS), | |
742 | htons(PTP_EVENT_PORT)); | |
743 | if (rc != 0) | |
744 | return rc; | |
745 | ||
746 | rc = efx_filter_insert_filter(efx, &rxfilter, true); | |
747 | if (rc < 0) | |
748 | return rc; | |
749 | ptp->rxfilter_event = rc; | |
750 | ||
751 | efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, | |
752 | efx_rx_queue_index( | |
753 | efx_channel_get_rx_queue(ptp->channel))); | |
754 | rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, | |
755 | htonl(PTP_ADDRESS), | |
756 | htons(PTP_GENERAL_PORT)); | |
757 | if (rc != 0) | |
758 | goto fail; | |
759 | ||
760 | rc = efx_filter_insert_filter(efx, &rxfilter, true); | |
761 | if (rc < 0) | |
762 | goto fail; | |
763 | ptp->rxfilter_general = rc; | |
764 | ||
765 | rc = efx_ptp_enable(efx); | |
766 | if (rc != 0) | |
767 | goto fail2; | |
768 | ||
769 | ptp->evt_frag_idx = 0; | |
770 | ptp->current_adjfreq = 0; | |
771 | ptp->rxfilter_installed = true; | |
772 | ||
773 | return 0; | |
774 | ||
775 | fail2: | |
776 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
777 | ptp->rxfilter_general); | |
778 | fail: | |
779 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
780 | ptp->rxfilter_event); | |
781 | ||
782 | return rc; | |
783 | } | |
784 | ||
785 | static int efx_ptp_stop(struct efx_nic *efx) | |
786 | { | |
787 | struct efx_ptp_data *ptp = efx->ptp_data; | |
788 | int rc = efx_ptp_disable(efx); | |
789 | struct list_head *cursor; | |
790 | struct list_head *next; | |
791 | ||
792 | if (ptp->rxfilter_installed) { | |
793 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
794 | ptp->rxfilter_general); | |
795 | efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, | |
796 | ptp->rxfilter_event); | |
797 | ptp->rxfilter_installed = false; | |
798 | } | |
799 | ||
800 | /* Make sure RX packets are really delivered */ | |
801 | efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); | |
802 | skb_queue_purge(&efx->ptp_data->txq); | |
803 | ||
804 | /* Drop any pending receive events */ | |
805 | spin_lock_bh(&efx->ptp_data->evt_lock); | |
806 | list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { | |
9545f4e2 | 807 | list_move(cursor, &efx->ptp_data->evt_free_list); |
7c236c43 SH |
808 | } |
809 | spin_unlock_bh(&efx->ptp_data->evt_lock); | |
810 | ||
811 | return rc; | |
812 | } | |
813 | ||
814 | static void efx_ptp_pps_worker(struct work_struct *work) | |
815 | { | |
816 | struct efx_ptp_data *ptp = | |
817 | container_of(work, struct efx_ptp_data, pps_work); | |
818 | struct efx_nic *efx = ptp->channel->efx; | |
819 | struct ptp_clock_event ptp_evt; | |
820 | ||
821 | if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) | |
822 | return; | |
823 | ||
824 | ptp_evt.type = PTP_CLOCK_PPSUSR; | |
825 | ptp_evt.pps_times = ptp->host_time_pps; | |
826 | ptp_clock_event(ptp->phc_clock, &ptp_evt); | |
827 | } | |
828 | ||
829 | /* Process any pending transmissions and timestamp any received packets. | |
830 | */ | |
831 | static void efx_ptp_worker(struct work_struct *work) | |
832 | { | |
833 | struct efx_ptp_data *ptp_data = | |
834 | container_of(work, struct efx_ptp_data, work); | |
835 | struct efx_nic *efx = ptp_data->channel->efx; | |
836 | struct sk_buff *skb; | |
837 | struct sk_buff_head tempq; | |
838 | ||
839 | if (ptp_data->reset_required) { | |
840 | efx_ptp_stop(efx); | |
841 | efx_ptp_start(efx); | |
842 | return; | |
843 | } | |
844 | ||
845 | efx_ptp_drop_time_expired_events(efx); | |
846 | ||
847 | __skb_queue_head_init(&tempq); | |
848 | if (efx_ptp_process_events(efx, &tempq) || | |
849 | !skb_queue_empty(&ptp_data->txq)) { | |
850 | ||
851 | while ((skb = skb_dequeue(&ptp_data->txq))) | |
852 | efx_ptp_xmit_skb(efx, skb); | |
853 | } | |
854 | ||
855 | while ((skb = __skb_dequeue(&tempq))) | |
856 | efx_ptp_process_rx(efx, skb); | |
857 | } | |
858 | ||
859 | /* Initialise PTP channel and state. | |
860 | * | |
861 | * Setting core_index to zero causes the queue to be initialised and doesn't | |
862 | * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. | |
863 | */ | |
864 | static int efx_ptp_probe_channel(struct efx_channel *channel) | |
865 | { | |
866 | struct efx_nic *efx = channel->efx; | |
867 | struct efx_ptp_data *ptp; | |
868 | int rc = 0; | |
869 | unsigned int pos; | |
870 | ||
871 | channel->irq_moderation = 0; | |
872 | channel->rx_queue.core_index = 0; | |
873 | ||
874 | ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); | |
875 | efx->ptp_data = ptp; | |
876 | if (!efx->ptp_data) | |
877 | return -ENOMEM; | |
878 | ||
0d19a540 | 879 | rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); |
7c236c43 SH |
880 | if (rc != 0) |
881 | goto fail1; | |
882 | ||
883 | ptp->channel = channel; | |
884 | skb_queue_head_init(&ptp->rxq); | |
885 | skb_queue_head_init(&ptp->txq); | |
886 | ptp->workwq = create_singlethread_workqueue("sfc_ptp"); | |
887 | if (!ptp->workwq) { | |
888 | rc = -ENOMEM; | |
889 | goto fail2; | |
890 | } | |
891 | ||
892 | INIT_WORK(&ptp->work, efx_ptp_worker); | |
893 | ptp->config.flags = 0; | |
894 | ptp->config.tx_type = HWTSTAMP_TX_OFF; | |
895 | ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; | |
896 | INIT_LIST_HEAD(&ptp->evt_list); | |
897 | INIT_LIST_HEAD(&ptp->evt_free_list); | |
898 | spin_lock_init(&ptp->evt_lock); | |
899 | for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) | |
900 | list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); | |
901 | ||
902 | ptp->phc_clock_info.owner = THIS_MODULE; | |
903 | snprintf(ptp->phc_clock_info.name, | |
904 | sizeof(ptp->phc_clock_info.name), | |
905 | "%pm", efx->net_dev->perm_addr); | |
906 | ptp->phc_clock_info.max_adj = MAX_PPB; | |
907 | ptp->phc_clock_info.n_alarm = 0; | |
908 | ptp->phc_clock_info.n_ext_ts = 0; | |
909 | ptp->phc_clock_info.n_per_out = 0; | |
910 | ptp->phc_clock_info.pps = 1; | |
911 | ptp->phc_clock_info.adjfreq = efx_phc_adjfreq; | |
912 | ptp->phc_clock_info.adjtime = efx_phc_adjtime; | |
913 | ptp->phc_clock_info.gettime = efx_phc_gettime; | |
914 | ptp->phc_clock_info.settime = efx_phc_settime; | |
915 | ptp->phc_clock_info.enable = efx_phc_enable; | |
916 | ||
1ef76158 RC |
917 | ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, |
918 | &efx->pci_dev->dev); | |
155d940a WY |
919 | if (IS_ERR(ptp->phc_clock)) { |
920 | rc = PTR_ERR(ptp->phc_clock); | |
7c236c43 | 921 | goto fail3; |
155d940a | 922 | } |
7c236c43 SH |
923 | |
924 | INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); | |
925 | ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); | |
926 | if (!ptp->pps_workwq) { | |
927 | rc = -ENOMEM; | |
928 | goto fail4; | |
929 | } | |
930 | ptp->nic_ts_enabled = false; | |
931 | ||
932 | return 0; | |
933 | fail4: | |
934 | ptp_clock_unregister(efx->ptp_data->phc_clock); | |
935 | ||
936 | fail3: | |
937 | destroy_workqueue(efx->ptp_data->workwq); | |
938 | ||
939 | fail2: | |
940 | efx_nic_free_buffer(efx, &ptp->start); | |
941 | ||
942 | fail1: | |
943 | kfree(efx->ptp_data); | |
944 | efx->ptp_data = NULL; | |
945 | ||
946 | return rc; | |
947 | } | |
948 | ||
949 | static void efx_ptp_remove_channel(struct efx_channel *channel) | |
950 | { | |
951 | struct efx_nic *efx = channel->efx; | |
952 | ||
953 | if (!efx->ptp_data) | |
954 | return; | |
955 | ||
956 | (void)efx_ptp_disable(channel->efx); | |
957 | ||
958 | cancel_work_sync(&efx->ptp_data->work); | |
959 | cancel_work_sync(&efx->ptp_data->pps_work); | |
960 | ||
961 | skb_queue_purge(&efx->ptp_data->rxq); | |
962 | skb_queue_purge(&efx->ptp_data->txq); | |
963 | ||
964 | ptp_clock_unregister(efx->ptp_data->phc_clock); | |
965 | ||
966 | destroy_workqueue(efx->ptp_data->workwq); | |
967 | destroy_workqueue(efx->ptp_data->pps_workwq); | |
968 | ||
969 | efx_nic_free_buffer(efx, &efx->ptp_data->start); | |
970 | kfree(efx->ptp_data); | |
971 | } | |
972 | ||
973 | static void efx_ptp_get_channel_name(struct efx_channel *channel, | |
974 | char *buf, size_t len) | |
975 | { | |
976 | snprintf(buf, len, "%s-ptp", channel->efx->name); | |
977 | } | |
978 | ||
979 | /* Determine whether this packet should be processed by the PTP module | |
980 | * or transmitted conventionally. | |
981 | */ | |
982 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |
983 | { | |
984 | return efx->ptp_data && | |
985 | efx->ptp_data->enabled && | |
986 | skb->len >= PTP_MIN_LENGTH && | |
987 | skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && | |
988 | likely(skb->protocol == htons(ETH_P_IP)) && | |
989 | ip_hdr(skb)->protocol == IPPROTO_UDP && | |
990 | udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); | |
991 | } | |
992 | ||
993 | /* Receive a PTP packet. Packets are queued until the arrival of | |
994 | * the receive timestamp from the MC - this will probably occur after the | |
995 | * packet arrival because of the processing in the MC. | |
996 | */ | |
4a74dc65 | 997 | static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) |
7c236c43 SH |
998 | { |
999 | struct efx_nic *efx = channel->efx; | |
1000 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1001 | struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; | |
c939a316 | 1002 | u8 *match_data_012, *match_data_345; |
7c236c43 SH |
1003 | unsigned int version; |
1004 | ||
1005 | match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); | |
1006 | ||
1007 | /* Correct version? */ | |
1008 | if (ptp->mode == MC_CMD_PTP_MODE_V1) { | |
97d48a10 | 1009 | if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { |
4a74dc65 | 1010 | return false; |
7c236c43 SH |
1011 | } |
1012 | version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); | |
1013 | if (version != PTP_VERSION_V1) { | |
4a74dc65 | 1014 | return false; |
7c236c43 | 1015 | } |
c939a316 LE |
1016 | |
1017 | /* PTP V1 uses all six bytes of the UUID to match the packet | |
1018 | * to the timestamp | |
1019 | */ | |
1020 | match_data_012 = skb->data + PTP_V1_UUID_OFFSET; | |
1021 | match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; | |
7c236c43 | 1022 | } else { |
97d48a10 | 1023 | if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { |
4a74dc65 | 1024 | return false; |
7c236c43 SH |
1025 | } |
1026 | version = skb->data[PTP_V2_VERSION_OFFSET]; | |
7c236c43 | 1027 | if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { |
4a74dc65 | 1028 | return false; |
7c236c43 | 1029 | } |
c939a316 LE |
1030 | |
1031 | /* The original V2 implementation uses bytes 2-7 of | |
1032 | * the UUID to match the packet to the timestamp. This | |
1033 | * discards two of the bytes of the MAC address used | |
1034 | * to create the UUID (SF bug 33070). The PTP V2 | |
1035 | * enhanced mode fixes this issue and uses bytes 0-2 | |
1036 | * and byte 5-7 of the UUID. | |
1037 | */ | |
1038 | match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; | |
1039 | if (ptp->mode == MC_CMD_PTP_MODE_V2) { | |
1040 | match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; | |
1041 | } else { | |
1042 | match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; | |
1043 | BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); | |
1044 | } | |
7c236c43 SH |
1045 | } |
1046 | ||
1047 | /* Does this packet require timestamping? */ | |
1048 | if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { | |
1049 | struct skb_shared_hwtstamps *timestamps; | |
1050 | ||
1051 | match->state = PTP_PACKET_STATE_UNMATCHED; | |
1052 | ||
1053 | /* Clear all timestamps held: filled in later */ | |
1054 | timestamps = skb_hwtstamps(skb); | |
1055 | memset(timestamps, 0, sizeof(*timestamps)); | |
1056 | ||
c939a316 LE |
1057 | /* We expect the sequence number to be in the same position in |
1058 | * the packet for PTP V1 and V2 | |
1059 | */ | |
1060 | BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); | |
1061 | BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); | |
1062 | ||
7c236c43 | 1063 | /* Extract UUID/Sequence information */ |
c939a316 LE |
1064 | match->words[0] = (match_data_012[0] | |
1065 | (match_data_012[1] << 8) | | |
1066 | (match_data_012[2] << 16) | | |
1067 | (match_data_345[0] << 24)); | |
1068 | match->words[1] = (match_data_345[1] | | |
1069 | (match_data_345[2] << 8) | | |
7c236c43 SH |
1070 | (skb->data[PTP_V1_SEQUENCE_OFFSET + |
1071 | PTP_V1_SEQUENCE_LENGTH - 1] << | |
1072 | 16)); | |
1073 | } else { | |
1074 | match->state = PTP_PACKET_STATE_MATCH_UNWANTED; | |
1075 | } | |
1076 | ||
1077 | skb_queue_tail(&ptp->rxq, skb); | |
1078 | queue_work(ptp->workwq, &ptp->work); | |
4a74dc65 BH |
1079 | |
1080 | return true; | |
7c236c43 SH |
1081 | } |
1082 | ||
1083 | /* Transmit a PTP packet. This has to be transmitted by the MC | |
1084 | * itself, through an MCDI call. MCDI calls aren't permitted | |
1085 | * in the transmit path so defer the actual transmission to a suitable worker. | |
1086 | */ | |
1087 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) | |
1088 | { | |
1089 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1090 | ||
1091 | skb_queue_tail(&ptp->txq, skb); | |
1092 | ||
1093 | if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && | |
1094 | (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) | |
1095 | efx_xmit_hwtstamp_pending(skb); | |
1096 | queue_work(ptp->workwq, &ptp->work); | |
1097 | ||
1098 | return NETDEV_TX_OK; | |
1099 | } | |
1100 | ||
1101 | static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |
1102 | unsigned int new_mode) | |
1103 | { | |
1104 | if ((enable_wanted != efx->ptp_data->enabled) || | |
1105 | (enable_wanted && (efx->ptp_data->mode != new_mode))) { | |
1106 | int rc; | |
1107 | ||
1108 | if (enable_wanted) { | |
1109 | /* Change of mode requires disable */ | |
1110 | if (efx->ptp_data->enabled && | |
1111 | (efx->ptp_data->mode != new_mode)) { | |
1112 | efx->ptp_data->enabled = false; | |
1113 | rc = efx_ptp_stop(efx); | |
1114 | if (rc != 0) | |
1115 | return rc; | |
1116 | } | |
1117 | ||
1118 | /* Set new operating mode and establish | |
1119 | * baseline synchronisation, which must | |
1120 | * succeed. | |
1121 | */ | |
1122 | efx->ptp_data->mode = new_mode; | |
1123 | rc = efx_ptp_start(efx); | |
1124 | if (rc == 0) { | |
1125 | rc = efx_ptp_synchronize(efx, | |
1126 | PTP_SYNC_ATTEMPTS * 2); | |
1127 | if (rc != 0) | |
1128 | efx_ptp_stop(efx); | |
1129 | } | |
1130 | } else { | |
1131 | rc = efx_ptp_stop(efx); | |
1132 | } | |
1133 | ||
1134 | if (rc != 0) | |
1135 | return rc; | |
1136 | ||
1137 | efx->ptp_data->enabled = enable_wanted; | |
1138 | } | |
1139 | ||
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) | |
1144 | { | |
1145 | bool enable_wanted = false; | |
1146 | unsigned int new_mode; | |
1147 | int rc; | |
1148 | ||
1149 | if (init->flags) | |
1150 | return -EINVAL; | |
1151 | ||
1152 | if ((init->tx_type != HWTSTAMP_TX_OFF) && | |
1153 | (init->tx_type != HWTSTAMP_TX_ON)) | |
1154 | return -ERANGE; | |
1155 | ||
1156 | new_mode = efx->ptp_data->mode; | |
1157 | /* Determine whether any PTP HW operations are required */ | |
1158 | switch (init->rx_filter) { | |
1159 | case HWTSTAMP_FILTER_NONE: | |
1160 | break; | |
1161 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: | |
1162 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: | |
1163 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: | |
1164 | init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
1165 | new_mode = MC_CMD_PTP_MODE_V1; | |
1166 | enable_wanted = true; | |
1167 | break; | |
1168 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: | |
1169 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: | |
1170 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: | |
1171 | /* Although these three are accepted only IPV4 packets will be | |
1172 | * timestamped | |
1173 | */ | |
1174 | init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; | |
c939a316 | 1175 | new_mode = MC_CMD_PTP_MODE_V2_ENHANCED; |
7c236c43 SH |
1176 | enable_wanted = true; |
1177 | break; | |
1178 | case HWTSTAMP_FILTER_PTP_V2_EVENT: | |
1179 | case HWTSTAMP_FILTER_PTP_V2_SYNC: | |
1180 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: | |
1181 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: | |
1182 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: | |
1183 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: | |
1184 | /* Non-IP + IPv6 timestamping not supported */ | |
1185 | return -ERANGE; | |
1186 | break; | |
1187 | default: | |
1188 | return -ERANGE; | |
1189 | } | |
1190 | ||
1191 | if (init->tx_type != HWTSTAMP_TX_OFF) | |
1192 | enable_wanted = true; | |
1193 | ||
c939a316 LE |
1194 | /* Old versions of the firmware do not support the improved |
1195 | * UUID filtering option (SF bug 33070). If the firmware does | |
1196 | * not accept the enhanced mode, fall back to the standard PTP | |
1197 | * v2 UUID filtering. | |
1198 | */ | |
7c236c43 | 1199 | rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); |
c939a316 LE |
1200 | if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED)) |
1201 | rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2); | |
7c236c43 SH |
1202 | if (rc != 0) |
1203 | return rc; | |
1204 | ||
1205 | efx->ptp_data->config = *init; | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
62ebac92 | 1210 | void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) |
7c236c43 | 1211 | { |
7c236c43 SH |
1212 | struct efx_ptp_data *ptp = efx->ptp_data; |
1213 | ||
1214 | if (!ptp) | |
62ebac92 | 1215 | return; |
7c236c43 | 1216 | |
62ebac92 BH |
1217 | ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | |
1218 | SOF_TIMESTAMPING_RX_HARDWARE | | |
1219 | SOF_TIMESTAMPING_RAW_HARDWARE); | |
7c236c43 SH |
1220 | ts_info->phc_index = ptp_clock_index(ptp->phc_clock); |
1221 | ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; | |
1222 | ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | | |
1223 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | | |
1224 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | | |
1225 | 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | | |
1226 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | | |
1227 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | | |
1228 | 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); | |
7c236c43 SH |
1229 | } |
1230 | ||
1231 | int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) | |
1232 | { | |
1233 | struct hwtstamp_config config; | |
1234 | int rc; | |
1235 | ||
1236 | /* Not a PTP enabled port */ | |
1237 | if (!efx->ptp_data) | |
1238 | return -EOPNOTSUPP; | |
1239 | ||
1240 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
1241 | return -EFAULT; | |
1242 | ||
1243 | rc = efx_ptp_ts_init(efx, &config); | |
1244 | if (rc != 0) | |
1245 | return rc; | |
1246 | ||
1247 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) | |
1248 | ? -EFAULT : 0; | |
1249 | } | |
1250 | ||
1251 | static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) | |
1252 | { | |
1253 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1254 | ||
1255 | netif_err(efx, hw, efx->net_dev, | |
1256 | "PTP unexpected event length: got %d expected %d\n", | |
1257 | ptp->evt_frag_idx, expected_frag_len); | |
1258 | ptp->reset_required = true; | |
1259 | queue_work(ptp->workwq, &ptp->work); | |
1260 | } | |
1261 | ||
1262 | /* Process a completed receive event. Put it on the event queue and | |
1263 | * start worker thread. This is required because event and their | |
1264 | * correspoding packets may come in either order. | |
1265 | */ | |
1266 | static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1267 | { | |
1268 | struct efx_ptp_event_rx *evt = NULL; | |
1269 | ||
1270 | if (ptp->evt_frag_idx != 3) { | |
1271 | ptp_event_failure(efx, 3); | |
1272 | return; | |
1273 | } | |
1274 | ||
1275 | spin_lock_bh(&ptp->evt_lock); | |
1276 | if (!list_empty(&ptp->evt_free_list)) { | |
1277 | evt = list_first_entry(&ptp->evt_free_list, | |
1278 | struct efx_ptp_event_rx, link); | |
1279 | list_del(&evt->link); | |
1280 | ||
1281 | evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); | |
1282 | evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], | |
1283 | MCDI_EVENT_SRC) | | |
1284 | (EFX_QWORD_FIELD(ptp->evt_frags[1], | |
1285 | MCDI_EVENT_SRC) << 8) | | |
1286 | (EFX_QWORD_FIELD(ptp->evt_frags[0], | |
1287 | MCDI_EVENT_SRC) << 16)); | |
1288 | evt->hwtimestamp = ktime_set( | |
1289 | EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), | |
1290 | EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA)); | |
1291 | evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); | |
1292 | list_add_tail(&evt->link, &ptp->evt_list); | |
1293 | ||
1294 | queue_work(ptp->workwq, &ptp->work); | |
1295 | } else { | |
1296 | netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); | |
1297 | } | |
1298 | spin_unlock_bh(&ptp->evt_lock); | |
1299 | } | |
1300 | ||
1301 | static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1302 | { | |
1303 | int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); | |
1304 | if (ptp->evt_frag_idx != 1) { | |
1305 | ptp_event_failure(efx, 1); | |
1306 | return; | |
1307 | } | |
1308 | ||
1309 | netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); | |
1310 | } | |
1311 | ||
1312 | static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) | |
1313 | { | |
1314 | if (ptp->nic_ts_enabled) | |
1315 | queue_work(ptp->pps_workwq, &ptp->pps_work); | |
1316 | } | |
1317 | ||
1318 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) | |
1319 | { | |
1320 | struct efx_ptp_data *ptp = efx->ptp_data; | |
1321 | int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); | |
1322 | ||
1323 | if (!ptp->enabled) | |
1324 | return; | |
1325 | ||
1326 | if (ptp->evt_frag_idx == 0) { | |
1327 | ptp->evt_code = code; | |
1328 | } else if (ptp->evt_code != code) { | |
1329 | netif_err(efx, hw, efx->net_dev, | |
1330 | "PTP out of sequence event %d\n", code); | |
1331 | ptp->evt_frag_idx = 0; | |
1332 | } | |
1333 | ||
1334 | ptp->evt_frags[ptp->evt_frag_idx++] = *ev; | |
1335 | if (!MCDI_EVENT_FIELD(*ev, CONT)) { | |
1336 | /* Process resulting event */ | |
1337 | switch (code) { | |
1338 | case MCDI_EVENT_CODE_PTP_RX: | |
1339 | ptp_event_rx(efx, ptp); | |
1340 | break; | |
1341 | case MCDI_EVENT_CODE_PTP_FAULT: | |
1342 | ptp_event_fault(efx, ptp); | |
1343 | break; | |
1344 | case MCDI_EVENT_CODE_PTP_PPS: | |
1345 | ptp_event_pps(efx, ptp); | |
1346 | break; | |
1347 | default: | |
1348 | netif_err(efx, hw, efx->net_dev, | |
1349 | "PTP unknown event %d\n", code); | |
1350 | break; | |
1351 | } | |
1352 | ptp->evt_frag_idx = 0; | |
1353 | } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { | |
1354 | netif_err(efx, hw, efx->net_dev, | |
1355 | "PTP too many event fragments\n"); | |
1356 | ptp->evt_frag_idx = 0; | |
1357 | } | |
1358 | } | |
1359 | ||
1360 | static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) | |
1361 | { | |
1362 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1363 | struct efx_ptp_data, | |
1364 | phc_clock_info); | |
1365 | struct efx_nic *efx = ptp_data->channel->efx; | |
59cfc479 | 1366 | MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); |
7c236c43 SH |
1367 | s64 adjustment_ns; |
1368 | int rc; | |
1369 | ||
1370 | if (delta > MAX_PPB) | |
1371 | delta = MAX_PPB; | |
1372 | else if (delta < -MAX_PPB) | |
1373 | delta = -MAX_PPB; | |
1374 | ||
1375 | /* Convert ppb to fixed point ns. */ | |
1376 | adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >> | |
1377 | (PPB_EXTRA_BITS + MAX_PPB_BITS)); | |
1378 | ||
1379 | MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | |
338f74df | 1380 | MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); |
7c236c43 SH |
1381 | MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); |
1382 | MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); | |
1383 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), | |
1384 | NULL, 0, NULL); | |
1385 | if (rc != 0) | |
1386 | return rc; | |
1387 | ||
1388 | ptp_data->current_adjfreq = delta; | |
1389 | return 0; | |
1390 | } | |
1391 | ||
1392 | static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) | |
1393 | { | |
1394 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1395 | struct efx_ptp_data, | |
1396 | phc_clock_info); | |
1397 | struct efx_nic *efx = ptp_data->channel->efx; | |
1398 | struct timespec delta_ts = ns_to_timespec(delta); | |
59cfc479 | 1399 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); |
7c236c43 SH |
1400 | |
1401 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); | |
338f74df | 1402 | MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0); |
7c236c43 SH |
1403 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); |
1404 | MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); | |
1405 | return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
1406 | NULL, 0, NULL); | |
1407 | } | |
1408 | ||
1409 | static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) | |
1410 | { | |
1411 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1412 | struct efx_ptp_data, | |
1413 | phc_clock_info); | |
1414 | struct efx_nic *efx = ptp_data->channel->efx; | |
59cfc479 BH |
1415 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); |
1416 | MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); | |
7c236c43 SH |
1417 | int rc; |
1418 | ||
1419 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); | |
1420 | ||
1421 | rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), | |
1422 | outbuf, sizeof(outbuf), NULL); | |
1423 | if (rc != 0) | |
1424 | return rc; | |
1425 | ||
1426 | ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS); | |
1427 | ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS); | |
1428 | return 0; | |
1429 | } | |
1430 | ||
1431 | static int efx_phc_settime(struct ptp_clock_info *ptp, | |
1432 | const struct timespec *e_ts) | |
1433 | { | |
1434 | /* Get the current NIC time, efx_phc_gettime. | |
1435 | * Subtract from the desired time to get the offset | |
1436 | * call efx_phc_adjtime with the offset | |
1437 | */ | |
1438 | int rc; | |
1439 | struct timespec time_now; | |
1440 | struct timespec delta; | |
1441 | ||
1442 | rc = efx_phc_gettime(ptp, &time_now); | |
1443 | if (rc != 0) | |
1444 | return rc; | |
1445 | ||
1446 | delta = timespec_sub(*e_ts, time_now); | |
1447 | ||
56567c6f | 1448 | rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta)); |
7c236c43 SH |
1449 | if (rc != 0) |
1450 | return rc; | |
1451 | ||
1452 | return 0; | |
1453 | } | |
1454 | ||
1455 | static int efx_phc_enable(struct ptp_clock_info *ptp, | |
1456 | struct ptp_clock_request *request, | |
1457 | int enable) | |
1458 | { | |
1459 | struct efx_ptp_data *ptp_data = container_of(ptp, | |
1460 | struct efx_ptp_data, | |
1461 | phc_clock_info); | |
1462 | if (request->type != PTP_CLK_REQ_PPS) | |
1463 | return -EOPNOTSUPP; | |
1464 | ||
1465 | ptp_data->nic_ts_enabled = !!enable; | |
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | static const struct efx_channel_type efx_ptp_channel_type = { | |
1470 | .handle_no_channel = efx_ptp_handle_no_channel, | |
1471 | .pre_probe = efx_ptp_probe_channel, | |
1472 | .post_remove = efx_ptp_remove_channel, | |
1473 | .get_name = efx_ptp_get_channel_name, | |
1474 | /* no copy operation; there is no need to reallocate this channel */ | |
1475 | .receive_skb = efx_ptp_rx, | |
1476 | .keep_eventq = false, | |
1477 | }; | |
1478 | ||
1479 | void efx_ptp_probe(struct efx_nic *efx) | |
1480 | { | |
1481 | /* Check whether PTP is implemented on this NIC. The DISABLE | |
1482 | * operation will succeed if and only if it is implemented. | |
1483 | */ | |
1484 | if (efx_ptp_disable(efx) == 0) | |
1485 | efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = | |
1486 | &efx_ptp_channel_type; | |
1487 | } |