1 // SPDX-License-Identifier: GPL-2.0
2 /* Microchip KSZ PTP Implementation
4 * Copyright (C) 2020 ARRI Lighting
5 * Copyright (C) 2022 Microchip Technology Inc.
8 #include <linux/dsa/ksz_common.h>
10 #include <linux/irqdomain.h>
11 #include <linux/kernel.h>
12 #include <linux/ptp_classify.h>
13 #include <linux/ptp_clock_kernel.h>
15 #include "ksz_common.h"
17 #include "ksz_ptp_reg.h"
19 #define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
20 #define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
21 #define work_to_xmit_work(w) \
22 container_of((w), struct ksz_deferred_xmit_work, work)
24 /* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
25 * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
27 #define KSZ_MAX_DRIFT_CORR 6249999
28 #define KSZ_MAX_PULSE_WIDTH 125000000LL
30 #define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
31 #define KSZ_PTP_SUBNS_BITS 32
33 #define KSZ_PTP_INT_START 13
35 static int ksz_ptp_tou_gpio(struct ksz_device *dev)
42 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
47 ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
48 LED_OVR_1 | LED_OVR_2);
52 return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
53 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
54 LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
57 static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
62 /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
63 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
65 data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
66 ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
70 data = FIELD_PREP(TRIG_INT_M, BIT(unit));
71 ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
75 /* Clear reset and set GPIO direction */
76 return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
80 static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
87 data = (pulse_ns / 8);
88 if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
94 static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
95 struct timespec64 const *ts)
99 /* Hardware has only 32 bit */
100 if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
103 ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
107 ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
114 static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
119 ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
124 * - the ACTIVE flag is NOT cleared an error!
126 ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
130 if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
131 dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
134 /* Unit will be reset on next access */
141 static int ksz_ptp_configure_perout(struct ksz_device *dev,
142 u32 cycle_width_ns, u32 pulse_width_ns,
143 struct timespec64 const *target_time,
149 data = FIELD_PREP(TRIG_NOTIFY, 1) |
150 FIELD_PREP(TRIG_GPO_M, index) |
151 FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
152 ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
156 ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
160 /* Set cycle count 0 - Infinite */
161 ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
165 data = (pulse_width_ns / 8);
166 ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
170 ret = ksz_ptp_tou_target_time_set(dev, target_time);
177 static int ksz_ptp_enable_perout(struct ksz_device *dev,
178 struct ptp_perout_request const *request,
181 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
182 u64 req_pulse_width_ns;
189 if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
192 if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
193 ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
196 pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
200 data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
201 FIELD_PREP(PTP_TOU_INDEX, request->index);
202 ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
203 PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
207 ret = ksz_ptp_tou_reset(dev, request->index);
212 ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
216 ptp_data->perout_target_time_first.tv_sec = request->start.sec;
217 ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
219 ptp_data->perout_period.tv_sec = request->period.sec;
220 ptp_data->perout_period.tv_nsec = request->period.nsec;
222 cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
223 if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
226 if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
227 pulse_width_ns = request->on.sec * NSEC_PER_SEC +
230 /* Use a duty cycle of 50%. Maximum pulse width supported by the
231 * hardware is a little bit more than 125 ms.
233 req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
234 request->period.nsec) / 2;
235 pulse_width_ns = min_t(u64, req_pulse_width_ns,
236 KSZ_MAX_PULSE_WIDTH);
239 ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
243 ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
244 &ptp_data->perout_target_time_first,
249 ret = ksz_ptp_tou_gpio(dev);
253 ret = ksz_ptp_tou_start(dev, request->index);
257 ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
262 static int ksz_ptp_enable_mode(struct ksz_device *dev)
264 struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
265 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
266 struct ksz_port *prt;
270 dsa_switch_for_each_user_port(dp, dev->ds) {
271 prt = &dev->ports[dp->index];
272 if (prt->hwts_tx_en || prt->hwts_rx_en) {
279 ptp_schedule_worker(ptp_data->clock, 0);
281 ptp_cancel_worker_sync(ptp_data->clock);
284 tagger_data->hwtstamp_set_state(dev->ds, tag_en);
286 return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
287 tag_en ? PTP_ENABLE : 0);
290 /* The function is return back the capability of timestamping feature when
291 * requested through ethtool -T <interface> utility
293 int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
295 struct ksz_device *dev = ds->priv;
296 struct ksz_ptp_data *ptp_data;
298 ptp_data = &dev->ptp_data;
300 if (!ptp_data->clock)
303 ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
304 SOF_TIMESTAMPING_RX_HARDWARE |
305 SOF_TIMESTAMPING_RAW_HARDWARE;
307 ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
310 ts->tx_types |= BIT(HWTSTAMP_TX_ON);
312 ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
313 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
314 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
315 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
317 ts->phc_index = ptp_clock_index(ptp_data->clock);
322 int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
324 struct ksz_device *dev = ds->priv;
325 struct hwtstamp_config *config;
326 struct ksz_port *prt;
328 prt = &dev->ports[port];
329 config = &prt->tstamp_config;
331 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
335 static int ksz_set_hwtstamp_config(struct ksz_device *dev,
336 struct ksz_port *prt,
337 struct hwtstamp_config *config)
344 switch (config->tx_type) {
345 case HWTSTAMP_TX_OFF:
346 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
347 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
348 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
349 prt->hwts_tx_en = false;
351 case HWTSTAMP_TX_ONESTEP_P2P:
352 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
353 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
354 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
355 prt->hwts_tx_en = true;
357 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
363 if (!is_lan937x(dev))
366 prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
367 prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
368 prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
369 prt->hwts_tx_en = true;
371 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
380 switch (config->rx_filter) {
381 case HWTSTAMP_FILTER_NONE:
382 prt->hwts_rx_en = false;
384 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
385 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
386 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
387 prt->hwts_rx_en = true;
389 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
390 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
391 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
392 prt->hwts_rx_en = true;
394 case HWTSTAMP_FILTER_PTP_V2_EVENT:
395 case HWTSTAMP_FILTER_PTP_V2_SYNC:
396 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
397 prt->hwts_rx_en = true;
400 config->rx_filter = HWTSTAMP_FILTER_NONE;
404 return ksz_ptp_enable_mode(dev);
407 int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
409 struct ksz_device *dev = ds->priv;
410 struct hwtstamp_config config;
411 struct ksz_port *prt;
414 prt = &dev->ports[port];
416 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
419 ret = ksz_set_hwtstamp_config(dev, prt, &config);
423 memcpy(&prt->tstamp_config, &config, sizeof(config));
425 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
431 static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
433 struct timespec64 ptp_clock_time;
434 struct ksz_ptp_data *ptp_data;
435 struct timespec64 diff;
436 struct timespec64 ts;
438 ptp_data = &dev->ptp_data;
439 ts = ktime_to_timespec64(tstamp);
441 spin_lock_bh(&ptp_data->clock_lock);
442 ptp_clock_time = ptp_data->clock_time;
443 spin_unlock_bh(&ptp_data->clock_lock);
445 /* calculate full time from partial time stamp */
446 ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
448 /* find nearest possible point in time */
449 diff = timespec64_sub(ts, ptp_clock_time);
452 else if (diff.tv_sec < -2)
455 return timespec64_to_ktime(ts);
458 bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
461 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
462 struct ksz_device *dev = ds->priv;
463 struct ptp_header *ptp_hdr;
464 struct ksz_port *prt;
469 prt = &dev->ports[port];
471 tstamp = KSZ_SKB_CB(skb)->tstamp;
472 memset(hwtstamps, 0, sizeof(*hwtstamps));
473 hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
475 if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
478 ptp_hdr = ptp_parse_header(skb, type);
482 ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
483 if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
486 /* Only subtract the partial time stamp from the correction field. When
487 * the hardware adds the egress time stamp to the correction field of
488 * the PDelay_Resp message on tx, also only the partial time stamp will
491 correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
492 correction -= ktime_to_ns(tstamp) << 16;
494 ptp_header_update_correction(skb, type, ptp_hdr, correction);
500 void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
502 struct ksz_device *dev = ds->priv;
503 struct ptp_header *hdr;
504 struct sk_buff *clone;
505 struct ksz_port *prt;
509 prt = &dev->ports[port];
511 if (!prt->hwts_tx_en)
514 type = ptp_classify_raw(skb);
515 if (type == PTP_CLASS_NONE)
518 hdr = ptp_parse_header(skb, type);
522 ptp_msg_type = ptp_get_msgtype(hdr, type);
524 switch (ptp_msg_type) {
525 case PTP_MSGTYPE_SYNC:
526 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
529 case PTP_MSGTYPE_PDELAY_REQ:
531 case PTP_MSGTYPE_PDELAY_RESP:
532 if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
533 KSZ_SKB_CB(skb)->ptp_type = type;
534 KSZ_SKB_CB(skb)->update_correction = true;
543 clone = skb_clone_sk(skb);
547 /* caching the value to be used in tag_ksz.c */
548 KSZ_SKB_CB(skb)->clone = clone;
551 static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
552 struct ksz_port *prt, struct sk_buff *skb)
554 struct skb_shared_hwtstamps hwtstamps = {};
557 /* timeout must include DSA conduit to transmit data, tstamp latency,
558 * IRQ latency and time for reading the time stamp.
560 ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
561 msecs_to_jiffies(100));
565 hwtstamps.hwtstamp = prt->tstamp_msg;
566 skb_complete_tx_timestamp(skb, &hwtstamps);
569 void ksz_port_deferred_xmit(struct kthread_work *work)
571 struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
572 struct sk_buff *clone, *skb = xmit_work->skb;
573 struct dsa_switch *ds = xmit_work->dp->ds;
574 struct ksz_device *dev = ds->priv;
575 struct ksz_port *prt;
577 prt = &dev->ports[xmit_work->dp->index];
579 clone = KSZ_SKB_CB(skb)->clone;
581 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
583 reinit_completion(&prt->tstamp_msg_comp);
585 dsa_enqueue_skb(skb, skb->dev);
587 ksz_ptp_txtstamp_skb(dev, prt, clone);
592 static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
599 /* Copy current PTP clock into shadow registers and read */
600 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
604 ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
608 ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
612 ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
616 ts->tv_sec = seconds;
617 ts->tv_nsec = nanoseconds + phase * 8;
622 static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
624 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
625 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
628 mutex_lock(&ptp_data->lock);
629 ret = _ksz_ptp_gettime(dev, ts);
630 mutex_unlock(&ptp_data->lock);
635 static int ksz_ptp_restart_perout(struct ksz_device *dev)
637 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
638 s64 now_ns, first_ns, period_ns, next_ns;
639 struct ptp_perout_request request;
640 struct timespec64 next;
641 struct timespec64 now;
645 dev_info(dev->dev, "Restarting periodic output signal\n");
647 ret = _ksz_ptp_gettime(dev, &now);
651 now_ns = timespec64_to_ns(&now);
652 first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
654 /* Calculate next perout event based on start time and period */
655 period_ns = timespec64_to_ns(&ptp_data->perout_period);
657 if (first_ns < now_ns) {
658 count = div_u64(now_ns - first_ns, period_ns);
659 next_ns = first_ns + count * period_ns;
664 /* Ensure 100 ms guard time prior next event */
665 while (next_ns < now_ns + 100000000)
666 next_ns += period_ns;
668 /* Restart periodic output signal */
669 next = ns_to_timespec64(next_ns);
670 request.start.sec = next.tv_sec;
671 request.start.nsec = next.tv_nsec;
672 request.period.sec = ptp_data->perout_period.tv_sec;
673 request.period.nsec = ptp_data->perout_period.tv_nsec;
677 return ksz_ptp_enable_perout(dev, &request, 1);
680 static int ksz_ptp_settime(struct ptp_clock_info *ptp,
681 const struct timespec64 *ts)
683 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
684 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
687 mutex_lock(&ptp_data->lock);
689 /* Write to shadow registers and Load PTP clock */
690 ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
694 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
698 ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
702 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
706 switch (ptp_data->tou_mode) {
707 case KSZ_PTP_TOU_IDLE:
710 case KSZ_PTP_TOU_PEROUT:
711 ret = ksz_ptp_restart_perout(dev);
718 spin_lock_bh(&ptp_data->clock_lock);
719 ptp_data->clock_time = *ts;
720 spin_unlock_bh(&ptp_data->clock_lock);
723 mutex_unlock(&ptp_data->lock);
728 static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
730 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
731 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
737 mutex_lock(&ptp_data->lock);
740 base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
741 negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
744 data32 &= PTP_SUBNANOSEC_M;
746 data32 |= PTP_RATE_DIR;
748 ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
752 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
757 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
763 mutex_unlock(&ptp_data->lock);
767 static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
769 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
770 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
771 struct timespec64 delta64 = ns_to_timespec64(delta);
776 mutex_lock(&ptp_data->lock);
778 /* do not use ns_to_timespec64(),
779 * both sec and nsec are subtracted by hw
781 sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
783 ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
787 ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
791 ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
795 data16 |= PTP_STEP_ADJ;
797 /* PTP_STEP_DIR -- 0: subtract, 1: add */
799 data16 &= ~PTP_STEP_DIR;
801 data16 |= PTP_STEP_DIR;
803 ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
807 switch (ptp_data->tou_mode) {
808 case KSZ_PTP_TOU_IDLE:
811 case KSZ_PTP_TOU_PEROUT:
812 ret = ksz_ptp_restart_perout(dev);
819 spin_lock_bh(&ptp_data->clock_lock);
820 ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
821 spin_unlock_bh(&ptp_data->clock_lock);
824 mutex_unlock(&ptp_data->lock);
828 static int ksz_ptp_enable(struct ptp_clock_info *ptp,
829 struct ptp_clock_request *req, int on)
831 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
832 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
836 case PTP_CLK_REQ_PEROUT:
837 mutex_lock(&ptp_data->lock);
838 ret = ksz_ptp_enable_perout(dev, &req->perout, on);
839 mutex_unlock(&ptp_data->lock);
848 static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
849 enum ptp_pin_function func, unsigned int chan)
865 /* Function is pointer to the do_aux_work in the ptp_clock capability */
866 static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
868 struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
869 struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
870 struct timespec64 ts;
873 mutex_lock(&ptp_data->lock);
874 ret = _ksz_ptp_gettime(dev, &ts);
878 spin_lock_bh(&ptp_data->clock_lock);
879 ptp_data->clock_time = ts;
880 spin_unlock_bh(&ptp_data->clock_lock);
883 mutex_unlock(&ptp_data->lock);
885 return HZ; /* reschedule in 1 second */
888 static int ksz_ptp_start_clock(struct ksz_device *dev)
890 struct ksz_ptp_data *ptp_data = &dev->ptp_data;
893 ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
897 ptp_data->clock_time.tv_sec = 0;
898 ptp_data->clock_time.tv_nsec = 0;
903 int ksz_ptp_clock_register(struct dsa_switch *ds)
905 struct ksz_device *dev = ds->priv;
906 struct ksz_ptp_data *ptp_data;
910 ptp_data = &dev->ptp_data;
911 mutex_init(&ptp_data->lock);
912 spin_lock_init(&ptp_data->clock_lock);
914 ptp_data->caps.owner = THIS_MODULE;
915 snprintf(ptp_data->caps.name, 16, "Microchip Clock");
916 ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
917 ptp_data->caps.gettime64 = ksz_ptp_gettime;
918 ptp_data->caps.settime64 = ksz_ptp_settime;
919 ptp_data->caps.adjfine = ksz_ptp_adjfine;
920 ptp_data->caps.adjtime = ksz_ptp_adjtime;
921 ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
922 ptp_data->caps.enable = ksz_ptp_enable;
923 ptp_data->caps.verify = ksz_ptp_verify_pin;
924 ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
925 ptp_data->caps.n_per_out = 3;
927 ret = ksz_ptp_start_clock(dev);
931 for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
932 struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
934 snprintf(ptp_pin->name,
935 sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
937 ptp_pin->func = PTP_PF_NONE;
940 ptp_data->caps.pin_config = ptp_data->pin_config;
942 /* Currently only P2P mode is supported. When 802_1AS bit is set, it
943 * forwards all PTP packets to host port and none to other ports.
945 ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
946 PTP_TC_P2P | PTP_802_1AS);
950 ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
951 if (IS_ERR_OR_NULL(ptp_data->clock))
952 return PTR_ERR(ptp_data->clock);
957 void ksz_ptp_clock_unregister(struct dsa_switch *ds)
959 struct ksz_device *dev = ds->priv;
960 struct ksz_ptp_data *ptp_data;
962 ptp_data = &dev->ptp_data;
965 ptp_clock_unregister(ptp_data->clock);
968 static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
970 struct ksz_ptp_irq *ptpmsg_irq = dev_id;
971 struct ksz_device *dev;
972 struct ksz_port *port;
977 port = ptpmsg_irq->port;
980 if (ptpmsg_irq->ts_en) {
981 ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
985 tstamp = ksz_decode_tstamp(tstamp_raw);
987 port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
989 complete(&port->tstamp_msg_comp);
995 static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
997 struct ksz_irq *ptpirq = dev_id;
998 unsigned int nhandled = 0;
999 struct ksz_device *dev;
1000 unsigned int sub_irq;
1007 ret = ksz_read16(dev, ptpirq->reg_status, &data);
1011 /* Clear the interrupts W1C */
1012 ret = ksz_write16(dev, ptpirq->reg_status, data);
1016 for (n = 0; n < ptpirq->nirqs; ++n) {
1017 if (data & BIT(n + KSZ_PTP_INT_START)) {
1018 sub_irq = irq_find_mapping(ptpirq->domain, n);
1019 handle_nested_irq(sub_irq);
1025 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
1028 static void ksz_ptp_irq_mask(struct irq_data *d)
1030 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1032 kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
1035 static void ksz_ptp_irq_unmask(struct irq_data *d)
1037 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1039 kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
1042 static void ksz_ptp_irq_bus_lock(struct irq_data *d)
1044 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1046 mutex_lock(&kirq->dev->lock_irq);
1049 static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
1051 struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
1052 struct ksz_device *dev = kirq->dev;
1055 ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
1057 dev_err(dev->dev, "failed to change IRQ mask\n");
1059 mutex_unlock(&dev->lock_irq);
1062 static const struct irq_chip ksz_ptp_irq_chip = {
1064 .irq_mask = ksz_ptp_irq_mask,
1065 .irq_unmask = ksz_ptp_irq_unmask,
1066 .irq_bus_lock = ksz_ptp_irq_bus_lock,
1067 .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
1070 static int ksz_ptp_irq_domain_map(struct irq_domain *d,
1071 unsigned int irq, irq_hw_number_t hwirq)
1073 irq_set_chip_data(irq, d->host_data);
1074 irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
1075 irq_set_noprobe(irq);
1080 static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
1081 .map = ksz_ptp_irq_domain_map,
1082 .xlate = irq_domain_xlate_twocell,
1085 static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
1087 struct ksz_ptp_irq *ptpmsg_irq;
1089 ptpmsg_irq = &port->ptpmsg_irq[n];
1091 free_irq(ptpmsg_irq->num, ptpmsg_irq);
1092 irq_dispose_mapping(ptpmsg_irq->num);
1095 static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
1097 u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
1098 REG_PTP_PORT_SYNC_TS};
1099 static const char * const name[] = {"pdresp-msg", "xdreq-msg",
1101 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
1102 struct ksz_ptp_irq *ptpmsg_irq;
1104 ptpmsg_irq = &port->ptpmsg_irq[n];
1106 ptpmsg_irq->port = port;
1107 ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
1109 strscpy(ptpmsg_irq->name, name[n]);
1111 ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
1112 if (ptpmsg_irq->num < 0)
1113 return ptpmsg_irq->num;
1115 return request_threaded_irq(ptpmsg_irq->num, NULL,
1116 ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
1117 ptpmsg_irq->name, ptpmsg_irq);
1120 int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
1122 struct ksz_device *dev = ds->priv;
1123 const struct ksz_dev_ops *ops = dev->dev_ops;
1124 struct ksz_port *port = &dev->ports[p];
1125 struct ksz_irq *ptpirq = &port->ptpirq;
1132 ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
1133 ptpirq->reg_status = ops->get_port_addr(p,
1134 REG_PTP_PORT_TX_INT_STATUS__2);
1135 snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
1137 init_completion(&port->tstamp_msg_comp);
1139 ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
1140 &ksz_ptp_irq_domain_ops, ptpirq);
1141 if (!ptpirq->domain)
1144 for (irq = 0; irq < ptpirq->nirqs; irq++)
1145 irq_create_mapping(ptpirq->domain, irq);
1147 ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
1148 if (ptpirq->irq_num < 0) {
1149 ret = ptpirq->irq_num;
1153 ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
1154 IRQF_ONESHOT, ptpirq->name, ptpirq);
1158 for (irq = 0; irq < ptpirq->nirqs; irq++) {
1159 ret = ksz_ptp_msg_irq_setup(port, irq);
1167 free_irq(ptpirq->irq_num, ptpirq);
1169 free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
1171 for (irq = 0; irq < ptpirq->nirqs; irq++)
1172 irq_dispose_mapping(port->ptpmsg_irq[irq].num);
1174 irq_domain_remove(ptpirq->domain);
1179 void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
1181 struct ksz_device *dev = ds->priv;
1182 struct ksz_port *port = &dev->ports[p];
1183 struct ksz_irq *ptpirq = &port->ptpirq;
1186 for (n = 0; n < ptpirq->nirqs; n++)
1187 ksz_ptp_msg_irq_free(port, n);
1189 free_irq(ptpirq->irq_num, ptpirq);
1190 irq_dispose_mapping(ptpirq->irq_num);
1192 irq_domain_remove(ptpirq->domain);
1197 MODULE_DESCRIPTION("PTP support for KSZ switch");
1198 MODULE_LICENSE("GPL");