1 // SPDX-License-Identifier: GPL-2.0+
2 /* PTP 1588 clock using the Renesas Ethernet AVB
4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp.
11 static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
13 struct net_device *ndev = priv->ndev;
16 error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
20 ravb_modify(ndev, GCCR, request, request);
21 return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
24 /* Caller must hold the lock */
25 static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
27 struct net_device *ndev = priv->ndev;
30 error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
34 ts->tv_nsec = ravb_read(ndev, GCT0);
35 ts->tv_sec = ravb_read(ndev, GCT1) |
36 ((s64)ravb_read(ndev, GCT2) << 32);
41 /* Caller must hold the lock */
42 static int ravb_ptp_time_write(struct ravb_private *priv,
43 const struct timespec64 *ts)
45 struct net_device *ndev = priv->ndev;
49 error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
53 gccr = ravb_read(ndev, GCCR);
56 ravb_write(ndev, ts->tv_nsec, GTO0);
57 ravb_write(ndev, ts->tv_sec, GTO1);
58 ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
59 ravb_write(ndev, gccr | GCCR_LTO, GCCR);
64 /* Caller must hold the lock */
65 static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
67 struct net_device *ndev = priv->ndev;
68 /* When the comparison value (GPTC.PTCV) is in range of
69 * [x-1 to x+1] (x is the configured increment value in
70 * GTI.TIV), it may happen that a comparison match is
71 * not detected when the timer wraps around.
73 u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
76 if (ns < gti_ns_plus_1)
78 else if (ns > 0 - gti_ns_plus_1)
79 ns = 0 - gti_ns_plus_1;
81 gccr = ravb_read(ndev, GCCR);
84 ravb_write(ndev, ns, GPTC);
85 ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
90 /* PTP clock operations */
91 static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
93 struct ravb_private *priv = container_of(ptp, struct ravb_private,
95 struct net_device *ndev = priv->ndev;
105 addend = priv->ptp.default_addend;
106 diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
108 addend = neg_adj ? addend - diff : addend + diff;
110 spin_lock_irqsave(&priv->lock, flags);
112 priv->ptp.current_addend = addend;
114 gccr = ravb_read(ndev, GCCR);
115 if (gccr & GCCR_LTI) {
116 spin_unlock_irqrestore(&priv->lock, flags);
119 ravb_write(ndev, addend & GTI_TIV, GTI);
120 ravb_write(ndev, gccr | GCCR_LTI, GCCR);
122 spin_unlock_irqrestore(&priv->lock, flags);
127 static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
129 struct ravb_private *priv = container_of(ptp, struct ravb_private,
131 struct timespec64 ts;
135 spin_lock_irqsave(&priv->lock, flags);
136 error = ravb_ptp_time_read(priv, &ts);
138 u64 now = ktime_to_ns(timespec64_to_ktime(ts));
140 ts = ns_to_timespec64(now + delta);
141 error = ravb_ptp_time_write(priv, &ts);
143 spin_unlock_irqrestore(&priv->lock, flags);
148 static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
150 struct ravb_private *priv = container_of(ptp, struct ravb_private,
155 spin_lock_irqsave(&priv->lock, flags);
156 error = ravb_ptp_time_read(priv, ts);
157 spin_unlock_irqrestore(&priv->lock, flags);
162 static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
163 const struct timespec64 *ts)
165 struct ravb_private *priv = container_of(ptp, struct ravb_private,
170 spin_lock_irqsave(&priv->lock, flags);
171 error = ravb_ptp_time_write(priv, ts);
172 spin_unlock_irqrestore(&priv->lock, flags);
177 static int ravb_ptp_extts(struct ptp_clock_info *ptp,
178 struct ptp_extts_request *req, int on)
180 struct ravb_private *priv = container_of(ptp, struct ravb_private,
182 const struct ravb_hw_info *info = priv->info;
183 struct net_device *ndev = priv->ndev;
186 /* Reject requests with unsupported flags */
187 if (req->flags & ~(PTP_ENABLE_FEATURE |
196 if (priv->ptp.extts[req->index] == on)
198 priv->ptp.extts[req->index] = on;
200 spin_lock_irqsave(&priv->lock, flags);
201 if (!info->multi_irqs)
202 ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
204 ravb_write(ndev, GIE_PTCS, GIE);
206 ravb_write(ndev, GID_PTCD, GID);
207 spin_unlock_irqrestore(&priv->lock, flags);
212 static int ravb_ptp_perout(struct ptp_clock_info *ptp,
213 struct ptp_perout_request *req, int on)
215 struct ravb_private *priv = container_of(ptp, struct ravb_private,
217 const struct ravb_hw_info *info = priv->info;
218 struct net_device *ndev = priv->ndev;
219 struct ravb_ptp_perout *perout;
223 /* Reject requests with unsupported flags */
234 start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
235 period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
237 if (start_ns > U32_MAX) {
239 "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
243 if (period_ns > U32_MAX) {
245 "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
249 spin_lock_irqsave(&priv->lock, flags);
251 perout = &priv->ptp.perout[req->index];
252 perout->target = (u32)start_ns;
253 perout->period = (u32)period_ns;
254 error = ravb_ptp_update_compare(priv, (u32)start_ns);
256 /* Unmask interrupt */
257 if (!info->multi_irqs)
258 ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
260 ravb_write(ndev, GIE_PTMS0, GIE);
263 spin_lock_irqsave(&priv->lock, flags);
265 perout = &priv->ptp.perout[req->index];
269 if (!info->multi_irqs)
270 ravb_modify(ndev, GIC, GIC_PTME, 0);
272 ravb_write(ndev, GID_PTMD0, GID);
274 spin_unlock_irqrestore(&priv->lock, flags);
279 static int ravb_ptp_enable(struct ptp_clock_info *ptp,
280 struct ptp_clock_request *req, int on)
283 case PTP_CLK_REQ_EXTTS:
284 return ravb_ptp_extts(ptp, &req->extts, on);
285 case PTP_CLK_REQ_PEROUT:
286 return ravb_ptp_perout(ptp, &req->perout, on);
292 static const struct ptp_clock_info ravb_ptp_info = {
293 .owner = THIS_MODULE,
294 .name = "ravb clock",
296 .n_ext_ts = N_EXT_TS,
297 .n_per_out = N_PER_OUT,
298 .adjfreq = ravb_ptp_adjfreq,
299 .adjtime = ravb_ptp_adjtime,
300 .gettime64 = ravb_ptp_gettime64,
301 .settime64 = ravb_ptp_settime64,
302 .enable = ravb_ptp_enable,
305 /* Caller must hold the lock */
306 void ravb_ptp_interrupt(struct net_device *ndev)
308 struct ravb_private *priv = netdev_priv(ndev);
309 u32 gis = ravb_read(ndev, GIS);
311 gis &= ravb_read(ndev, GIC);
312 if (gis & GIS_PTCF) {
313 struct ptp_clock_event event;
315 event.type = PTP_CLOCK_EXTTS;
317 event.timestamp = ravb_read(ndev, GCPT);
318 ptp_clock_event(priv->ptp.clock, &event);
320 if (gis & GIS_PTMF) {
321 struct ravb_ptp_perout *perout = priv->ptp.perout;
323 if (perout->period) {
324 perout->target += perout->period;
325 ravb_ptp_update_compare(priv, perout->target);
329 ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
332 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
334 struct ravb_private *priv = netdev_priv(ndev);
337 priv->ptp.info = ravb_ptp_info;
339 priv->ptp.default_addend = ravb_read(ndev, GTI);
340 priv->ptp.current_addend = priv->ptp.default_addend;
342 spin_lock_irqsave(&priv->lock, flags);
343 ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
344 ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
345 spin_unlock_irqrestore(&priv->lock, flags);
347 priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
350 void ravb_ptp_stop(struct net_device *ndev)
352 struct ravb_private *priv = netdev_priv(ndev);
354 ravb_write(ndev, 0, GIC);
355 ravb_write(ndev, 0, GIS);
357 ptp_clock_unregister(priv->ptp.clock);