]> Git Repo - linux.git/blob - drivers/firmware/arm_ffa/driver.c
x86/kaslr: Expose and use the end of the physical memory address space
[linux.git] / drivers / firmware / arm_ffa / driver.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Arm Firmware Framework for ARMv8-A(FFA) interface driver
4  *
5  * The Arm FFA specification[1] describes a software architecture to
6  * leverages the virtualization extension to isolate software images
7  * provided by an ecosystem of vendors from each other and describes
8  * interfaces that standardize communication between the various software
9  * images including communication between images in the Secure world and
10  * Normal world. Any Hypervisor could use the FFA interfaces to enable
11  * communication between VMs it manages.
12  *
13  * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14  * system resources(Memory regions, Devices, CPU cycles) to the partitions
15  * and manage isolation amongst them.
16  *
17  * [1] https://developer.arm.com/docs/den0077/latest
18  *
19  * Copyright (C) 2021 ARM Ltd.
20  */
21
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
24
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/device.h>
30 #include <linux/hashtable.h>
31 #include <linux/interrupt.h>
32 #include <linux/io.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mm.h>
36 #include <linux/mutex.h>
37 #include <linux/of_irq.h>
38 #include <linux/scatterlist.h>
39 #include <linux/slab.h>
40 #include <linux/smp.h>
41 #include <linux/uuid.h>
42 #include <linux/xarray.h>
43
44 #include "common.h"
45
46 #define FFA_DRIVER_VERSION      FFA_VERSION_1_1
47 #define FFA_MIN_VERSION         FFA_VERSION_1_0
48
49 #define SENDER_ID_MASK          GENMASK(31, 16)
50 #define RECEIVER_ID_MASK        GENMASK(15, 0)
51 #define SENDER_ID(x)            ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
52 #define RECEIVER_ID(x)          ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
53 #define PACK_TARGET_INFO(s, r)          \
54         (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
55
56 /*
57  * Keeping RX TX buffer size as 4K for now
58  * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
59  */
60 #define RXTX_BUFFER_SIZE        SZ_4K
61
62 #define FFA_MAX_NOTIFICATIONS           64
63
64 static ffa_fn *invoke_ffa_fn;
65
66 static const int ffa_linux_errmap[] = {
67         /* better than switch case as long as return value is continuous */
68         0,              /* FFA_RET_SUCCESS */
69         -EOPNOTSUPP,    /* FFA_RET_NOT_SUPPORTED */
70         -EINVAL,        /* FFA_RET_INVALID_PARAMETERS */
71         -ENOMEM,        /* FFA_RET_NO_MEMORY */
72         -EBUSY,         /* FFA_RET_BUSY */
73         -EINTR,         /* FFA_RET_INTERRUPTED */
74         -EACCES,        /* FFA_RET_DENIED */
75         -EAGAIN,        /* FFA_RET_RETRY */
76         -ECANCELED,     /* FFA_RET_ABORTED */
77         -ENODATA,       /* FFA_RET_NO_DATA */
78 };
79
80 static inline int ffa_to_linux_errno(int errno)
81 {
82         int err_idx = -errno;
83
84         if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
85                 return ffa_linux_errmap[err_idx];
86         return -EINVAL;
87 }
88
89 struct ffa_pcpu_irq {
90         struct ffa_drv_info *info;
91 };
92
93 struct ffa_drv_info {
94         u32 version;
95         u16 vm_id;
96         struct mutex rx_lock; /* lock to protect Rx buffer */
97         struct mutex tx_lock; /* lock to protect Tx buffer */
98         void *rx_buffer;
99         void *tx_buffer;
100         bool mem_ops_native;
101         bool bitmap_created;
102         bool notif_enabled;
103         unsigned int sched_recv_irq;
104         unsigned int notif_pend_irq;
105         unsigned int cpuhp_state;
106         struct ffa_pcpu_irq __percpu *irq_pcpu;
107         struct workqueue_struct *notif_pcpu_wq;
108         struct work_struct notif_pcpu_work;
109         struct work_struct sched_recv_irq_work;
110         struct xarray partition_info;
111         DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
112         struct mutex notify_lock; /* lock to protect notifier hashtable  */
113 };
114
115 static struct ffa_drv_info *drv_info;
116 static void ffa_partitions_cleanup(void);
117
118 /*
119  * The driver must be able to support all the versions from the earliest
120  * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
121  * The specification states that if firmware supports a FFA implementation
122  * that is incompatible with and at a greater version number than specified
123  * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
124  * it must return the NOT_SUPPORTED error code.
125  */
126 static u32 ffa_compatible_version_find(u32 version)
127 {
128         u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
129         u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
130         u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
131
132         if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
133                 return version;
134
135         pr_info("Firmware version higher than driver version, downgrading\n");
136         return FFA_DRIVER_VERSION;
137 }
138
139 static int ffa_version_check(u32 *version)
140 {
141         ffa_value_t ver;
142
143         invoke_ffa_fn((ffa_value_t){
144                       .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
145                       }, &ver);
146
147         if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
148                 pr_info("FFA_VERSION returned not supported\n");
149                 return -EOPNOTSUPP;
150         }
151
152         if (ver.a0 < FFA_MIN_VERSION) {
153                 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
154                        FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
155                        FFA_MAJOR_VERSION(FFA_MIN_VERSION),
156                        FFA_MINOR_VERSION(FFA_MIN_VERSION));
157                 return -EINVAL;
158         }
159
160         pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
161                 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
162         pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
163                 FFA_MINOR_VERSION(ver.a0));
164         *version = ffa_compatible_version_find(ver.a0);
165
166         return 0;
167 }
168
169 static int ffa_rx_release(void)
170 {
171         ffa_value_t ret;
172
173         invoke_ffa_fn((ffa_value_t){
174                       .a0 = FFA_RX_RELEASE,
175                       }, &ret);
176
177         if (ret.a0 == FFA_ERROR)
178                 return ffa_to_linux_errno((int)ret.a2);
179
180         /* check for ret.a0 == FFA_RX_RELEASE ? */
181
182         return 0;
183 }
184
185 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
186 {
187         ffa_value_t ret;
188
189         invoke_ffa_fn((ffa_value_t){
190                       .a0 = FFA_FN_NATIVE(RXTX_MAP),
191                       .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
192                       }, &ret);
193
194         if (ret.a0 == FFA_ERROR)
195                 return ffa_to_linux_errno((int)ret.a2);
196
197         return 0;
198 }
199
200 static int ffa_rxtx_unmap(u16 vm_id)
201 {
202         ffa_value_t ret;
203
204         invoke_ffa_fn((ffa_value_t){
205                       .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
206                       }, &ret);
207
208         if (ret.a0 == FFA_ERROR)
209                 return ffa_to_linux_errno((int)ret.a2);
210
211         return 0;
212 }
213
214 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY    BIT(0)
215
216 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
217 static int
218 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
219                          struct ffa_partition_info *buffer, int num_partitions)
220 {
221         int idx, count, flags = 0, sz, buf_sz;
222         ffa_value_t partition_info;
223
224         if (drv_info->version > FFA_VERSION_1_0 &&
225             (!buffer || !num_partitions)) /* Just get the count for now */
226                 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
227
228         mutex_lock(&drv_info->rx_lock);
229         invoke_ffa_fn((ffa_value_t){
230                       .a0 = FFA_PARTITION_INFO_GET,
231                       .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
232                       .a5 = flags,
233                       }, &partition_info);
234
235         if (partition_info.a0 == FFA_ERROR) {
236                 mutex_unlock(&drv_info->rx_lock);
237                 return ffa_to_linux_errno((int)partition_info.a2);
238         }
239
240         count = partition_info.a2;
241
242         if (drv_info->version > FFA_VERSION_1_0) {
243                 buf_sz = sz = partition_info.a3;
244                 if (sz > sizeof(*buffer))
245                         buf_sz = sizeof(*buffer);
246         } else {
247                 /* FFA_VERSION_1_0 lacks size in the response */
248                 buf_sz = sz = 8;
249         }
250
251         if (buffer && count <= num_partitions)
252                 for (idx = 0; idx < count; idx++)
253                         memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
254                                buf_sz);
255
256         ffa_rx_release();
257
258         mutex_unlock(&drv_info->rx_lock);
259
260         return count;
261 }
262
263 /* buffer is allocated and caller must free the same if returned count > 0 */
264 static int
265 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
266 {
267         int count;
268         u32 uuid0_4[4];
269         struct ffa_partition_info *pbuf;
270
271         export_uuid((u8 *)uuid0_4, uuid);
272         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
273                                          uuid0_4[3], NULL, 0);
274         if (count <= 0)
275                 return count;
276
277         pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
278         if (!pbuf)
279                 return -ENOMEM;
280
281         count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
282                                          uuid0_4[3], pbuf, count);
283         if (count <= 0)
284                 kfree(pbuf);
285         else
286                 *buffer = pbuf;
287
288         return count;
289 }
290
291 #define VM_ID_MASK      GENMASK(15, 0)
292 static int ffa_id_get(u16 *vm_id)
293 {
294         ffa_value_t id;
295
296         invoke_ffa_fn((ffa_value_t){
297                       .a0 = FFA_ID_GET,
298                       }, &id);
299
300         if (id.a0 == FFA_ERROR)
301                 return ffa_to_linux_errno((int)id.a2);
302
303         *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
304
305         return 0;
306 }
307
308 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
309                                    struct ffa_send_direct_data *data)
310 {
311         u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
312         ffa_value_t ret;
313
314         if (mode_32bit) {
315                 req_id = FFA_MSG_SEND_DIRECT_REQ;
316                 resp_id = FFA_MSG_SEND_DIRECT_RESP;
317         } else {
318                 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
319                 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
320         }
321
322         invoke_ffa_fn((ffa_value_t){
323                       .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
324                       .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
325                       .a6 = data->data3, .a7 = data->data4,
326                       }, &ret);
327
328         while (ret.a0 == FFA_INTERRUPT)
329                 invoke_ffa_fn((ffa_value_t){
330                               .a0 = FFA_RUN, .a1 = ret.a1,
331                               }, &ret);
332
333         if (ret.a0 == FFA_ERROR)
334                 return ffa_to_linux_errno((int)ret.a2);
335
336         if (ret.a0 == resp_id) {
337                 data->data0 = ret.a3;
338                 data->data1 = ret.a4;
339                 data->data2 = ret.a5;
340                 data->data3 = ret.a6;
341                 data->data4 = ret.a7;
342                 return 0;
343         }
344
345         return -EINVAL;
346 }
347
348 static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
349 {
350         u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
351         struct ffa_indirect_msg_hdr *msg;
352         ffa_value_t ret;
353         int retval = 0;
354
355         if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg)))
356                 return -ERANGE;
357
358         mutex_lock(&drv_info->tx_lock);
359
360         msg = drv_info->tx_buffer;
361         msg->flags = 0;
362         msg->res0 = 0;
363         msg->offset = sizeof(*msg);
364         msg->send_recv_id = src_dst_ids;
365         msg->size = sz;
366         memcpy((u8 *)msg + msg->offset, buf, sz);
367
368         /* flags = 0, sender VMID = 0 works for both physical/virtual NS */
369         invoke_ffa_fn((ffa_value_t){
370                       .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0
371                       }, &ret);
372
373         if (ret.a0 == FFA_ERROR)
374                 retval = ffa_to_linux_errno((int)ret.a2);
375
376         mutex_unlock(&drv_info->tx_lock);
377         return retval;
378 }
379
380 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
381                               u32 frag_len, u32 len, u64 *handle)
382 {
383         ffa_value_t ret;
384
385         invoke_ffa_fn((ffa_value_t){
386                       .a0 = func_id, .a1 = len, .a2 = frag_len,
387                       .a3 = buf, .a4 = buf_sz,
388                       }, &ret);
389
390         while (ret.a0 == FFA_MEM_OP_PAUSE)
391                 invoke_ffa_fn((ffa_value_t){
392                               .a0 = FFA_MEM_OP_RESUME,
393                               .a1 = ret.a1, .a2 = ret.a2,
394                               }, &ret);
395
396         if (ret.a0 == FFA_ERROR)
397                 return ffa_to_linux_errno((int)ret.a2);
398
399         if (ret.a0 == FFA_SUCCESS) {
400                 if (handle)
401                         *handle = PACK_HANDLE(ret.a2, ret.a3);
402         } else if (ret.a0 == FFA_MEM_FRAG_RX) {
403                 if (handle)
404                         *handle = PACK_HANDLE(ret.a1, ret.a2);
405         } else {
406                 return -EOPNOTSUPP;
407         }
408
409         return frag_len;
410 }
411
412 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
413 {
414         ffa_value_t ret;
415
416         invoke_ffa_fn((ffa_value_t){
417                       .a0 = FFA_MEM_FRAG_TX,
418                       .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
419                       .a3 = frag_len,
420                       }, &ret);
421
422         while (ret.a0 == FFA_MEM_OP_PAUSE)
423                 invoke_ffa_fn((ffa_value_t){
424                               .a0 = FFA_MEM_OP_RESUME,
425                               .a1 = ret.a1, .a2 = ret.a2,
426                               }, &ret);
427
428         if (ret.a0 == FFA_ERROR)
429                 return ffa_to_linux_errno((int)ret.a2);
430
431         if (ret.a0 == FFA_MEM_FRAG_RX)
432                 return ret.a3;
433         else if (ret.a0 == FFA_SUCCESS)
434                 return 0;
435
436         return -EOPNOTSUPP;
437 }
438
439 static int
440 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
441                       u32 len, u64 *handle, bool first)
442 {
443         if (!first)
444                 return ffa_mem_next_frag(*handle, frag_len);
445
446         return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
447 }
448
449 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
450 {
451         u32 num_pages = 0;
452
453         do {
454                 num_pages += sg->length / FFA_PAGE_SIZE;
455         } while ((sg = sg_next(sg)));
456
457         return num_pages;
458 }
459
460 static u16 ffa_memory_attributes_get(u32 func_id)
461 {
462         /*
463          * For the memory lend or donate operation, if the receiver is a PE or
464          * a proxy endpoint, the owner/sender must not specify the attributes
465          */
466         if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
467             func_id == FFA_MEM_LEND)
468                 return 0;
469
470         return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
471 }
472
473 static int
474 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
475                        struct ffa_mem_ops_args *args)
476 {
477         int rc = 0;
478         bool first = true;
479         u32 composite_offset;
480         phys_addr_t addr = 0;
481         struct ffa_mem_region *mem_region = buffer;
482         struct ffa_composite_mem_region *composite;
483         struct ffa_mem_region_addr_range *constituents;
484         struct ffa_mem_region_attributes *ep_mem_access;
485         u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
486
487         mem_region->tag = args->tag;
488         mem_region->flags = args->flags;
489         mem_region->sender_id = drv_info->vm_id;
490         mem_region->attributes = ffa_memory_attributes_get(func_id);
491         ep_mem_access = buffer +
492                         ffa_mem_desc_offset(buffer, 0, drv_info->version);
493         composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
494                                                drv_info->version);
495
496         for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
497                 ep_mem_access->receiver = args->attrs[idx].receiver;
498                 ep_mem_access->attrs = args->attrs[idx].attrs;
499                 ep_mem_access->composite_off = composite_offset;
500                 ep_mem_access->flag = 0;
501                 ep_mem_access->reserved = 0;
502         }
503         mem_region->handle = 0;
504         mem_region->ep_count = args->nattrs;
505         if (drv_info->version <= FFA_VERSION_1_0) {
506                 mem_region->ep_mem_size = 0;
507         } else {
508                 mem_region->ep_mem_size = sizeof(*ep_mem_access);
509                 mem_region->ep_mem_offset = sizeof(*mem_region);
510                 memset(mem_region->reserved, 0, 12);
511         }
512
513         composite = buffer + composite_offset;
514         composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
515         composite->addr_range_cnt = num_entries;
516         composite->reserved = 0;
517
518         length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
519         frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
520         if (frag_len > max_fragsize)
521                 return -ENXIO;
522
523         if (!args->use_txbuf) {
524                 addr = virt_to_phys(buffer);
525                 buf_sz = max_fragsize / FFA_PAGE_SIZE;
526         }
527
528         constituents = buffer + frag_len;
529         idx = 0;
530         do {
531                 if (frag_len == max_fragsize) {
532                         rc = ffa_transmit_fragment(func_id, addr, buf_sz,
533                                                    frag_len, length,
534                                                    &args->g_handle, first);
535                         if (rc < 0)
536                                 return -ENXIO;
537
538                         first = false;
539                         idx = 0;
540                         frag_len = 0;
541                         constituents = buffer;
542                 }
543
544                 if ((void *)constituents - buffer > max_fragsize) {
545                         pr_err("Memory Region Fragment > Tx Buffer size\n");
546                         return -EFAULT;
547                 }
548
549                 constituents->address = sg_phys(args->sg);
550                 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
551                 constituents->reserved = 0;
552                 constituents++;
553                 frag_len += sizeof(struct ffa_mem_region_addr_range);
554         } while ((args->sg = sg_next(args->sg)));
555
556         return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
557                                      length, &args->g_handle, first);
558 }
559
560 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
561 {
562         int ret;
563         void *buffer;
564
565         if (!args->use_txbuf) {
566                 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
567                 if (!buffer)
568                         return -ENOMEM;
569         } else {
570                 buffer = drv_info->tx_buffer;
571                 mutex_lock(&drv_info->tx_lock);
572         }
573
574         ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
575
576         if (args->use_txbuf)
577                 mutex_unlock(&drv_info->tx_lock);
578         else
579                 free_pages_exact(buffer, RXTX_BUFFER_SIZE);
580
581         return ret < 0 ? ret : 0;
582 }
583
584 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
585 {
586         ffa_value_t ret;
587
588         invoke_ffa_fn((ffa_value_t){
589                       .a0 = FFA_MEM_RECLAIM,
590                       .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
591                       .a3 = flags,
592                       }, &ret);
593
594         if (ret.a0 == FFA_ERROR)
595                 return ffa_to_linux_errno((int)ret.a2);
596
597         return 0;
598 }
599
600 static int ffa_features(u32 func_feat_id, u32 input_props,
601                         u32 *if_props_1, u32 *if_props_2)
602 {
603         ffa_value_t id;
604
605         if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
606                 pr_err("%s: Invalid Parameters: %x, %x", __func__,
607                        func_feat_id, input_props);
608                 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
609         }
610
611         invoke_ffa_fn((ffa_value_t){
612                 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
613                 }, &id);
614
615         if (id.a0 == FFA_ERROR)
616                 return ffa_to_linux_errno((int)id.a2);
617
618         if (if_props_1)
619                 *if_props_1 = id.a2;
620         if (if_props_2)
621                 *if_props_2 = id.a3;
622
623         return 0;
624 }
625
626 static int ffa_notification_bitmap_create(void)
627 {
628         ffa_value_t ret;
629         u16 vcpu_count = nr_cpu_ids;
630
631         invoke_ffa_fn((ffa_value_t){
632                       .a0 = FFA_NOTIFICATION_BITMAP_CREATE,
633                       .a1 = drv_info->vm_id, .a2 = vcpu_count,
634                       }, &ret);
635
636         if (ret.a0 == FFA_ERROR)
637                 return ffa_to_linux_errno((int)ret.a2);
638
639         return 0;
640 }
641
642 static int ffa_notification_bitmap_destroy(void)
643 {
644         ffa_value_t ret;
645
646         invoke_ffa_fn((ffa_value_t){
647                       .a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
648                       .a1 = drv_info->vm_id,
649                       }, &ret);
650
651         if (ret.a0 == FFA_ERROR)
652                 return ffa_to_linux_errno((int)ret.a2);
653
654         return 0;
655 }
656
657 #define NOTIFICATION_LOW_MASK           GENMASK(31, 0)
658 #define NOTIFICATION_HIGH_MASK          GENMASK(63, 32)
659 #define NOTIFICATION_BITMAP_HIGH(x)     \
660                 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
661 #define NOTIFICATION_BITMAP_LOW(x)      \
662                 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
663 #define PACK_NOTIFICATION_BITMAP(low, high)     \
664         (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
665          FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
666
667 #define RECEIVER_VCPU_MASK              GENMASK(31, 16)
668 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
669         (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
670          FIELD_PREP(RECEIVER_ID_MASK, (r)))
671
672 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK    BIT(0)
673 #define NOTIFICATION_INFO_GET_ID_COUNT          GENMASK(11, 7)
674 #define ID_LIST_MASK_64                         GENMASK(51, 12)
675 #define ID_LIST_MASK_32                         GENMASK(31, 12)
676 #define MAX_IDS_64                              20
677 #define MAX_IDS_32                              10
678
679 #define PER_VCPU_NOTIFICATION_FLAG              BIT(0)
680 #define SECURE_PARTITION_BITMAP                 BIT(0)
681 #define NON_SECURE_VM_BITMAP                    BIT(1)
682 #define SPM_FRAMEWORK_BITMAP                    BIT(2)
683 #define NS_HYP_FRAMEWORK_BITMAP                 BIT(3)
684
685 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
686                                         u32 flags, bool is_bind)
687 {
688         ffa_value_t ret;
689         u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
690
691         func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
692
693         invoke_ffa_fn((ffa_value_t){
694                   .a0 = func, .a1 = src_dst_ids, .a2 = flags,
695                   .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
696                   .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
697                   }, &ret);
698
699         if (ret.a0 == FFA_ERROR)
700                 return ffa_to_linux_errno((int)ret.a2);
701         else if (ret.a0 != FFA_SUCCESS)
702                 return -EINVAL;
703
704         return 0;
705 }
706
707 static
708 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
709 {
710         ffa_value_t ret;
711         u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
712
713         invoke_ffa_fn((ffa_value_t) {
714                   .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
715                   .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
716                   .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
717                   }, &ret);
718
719         if (ret.a0 == FFA_ERROR)
720                 return ffa_to_linux_errno((int)ret.a2);
721         else if (ret.a0 != FFA_SUCCESS)
722                 return -EINVAL;
723
724         return 0;
725 }
726
727 struct ffa_notify_bitmaps {
728         u64 sp_map;
729         u64 vm_map;
730         u64 arch_map;
731 };
732
733 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
734 {
735         ffa_value_t ret;
736         u16 src_id = drv_info->vm_id;
737         u16 cpu_id = smp_processor_id();
738         u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
739
740         invoke_ffa_fn((ffa_value_t){
741                   .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
742                   }, &ret);
743
744         if (ret.a0 == FFA_ERROR)
745                 return ffa_to_linux_errno((int)ret.a2);
746         else if (ret.a0 != FFA_SUCCESS)
747                 return -EINVAL; /* Something else went wrong. */
748
749         notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
750         notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
751         notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
752
753         return 0;
754 }
755
756 struct ffa_dev_part_info {
757         ffa_sched_recv_cb callback;
758         void *cb_data;
759         rwlock_t rw_lock;
760 };
761
762 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
763 {
764         struct ffa_dev_part_info *partition;
765         ffa_sched_recv_cb callback;
766         void *cb_data;
767
768         partition = xa_load(&drv_info->partition_info, part_id);
769         if (!partition) {
770                 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
771                 return;
772         }
773
774         read_lock(&partition->rw_lock);
775         callback = partition->callback;
776         cb_data = partition->cb_data;
777         read_unlock(&partition->rw_lock);
778
779         if (callback)
780                 callback(vcpu, is_per_vcpu, cb_data);
781 }
782
783 static void ffa_notification_info_get(void)
784 {
785         int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
786         bool is_64b_resp;
787         ffa_value_t ret;
788         u64 id_list;
789
790         do {
791                 invoke_ffa_fn((ffa_value_t){
792                           .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
793                           }, &ret);
794
795                 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
796                         if (ret.a2 != FFA_RET_NO_DATA)
797                                 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
798                                        ret.a0, ret.a2);
799                         return;
800                 }
801
802                 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
803
804                 ids_processed = 0;
805                 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
806                 if (is_64b_resp) {
807                         max_ids = MAX_IDS_64;
808                         id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
809                 } else {
810                         max_ids = MAX_IDS_32;
811                         id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
812                 }
813
814                 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
815                         ids_count[idx] = (id_list & 0x3) + 1;
816
817                 /* Process IDs */
818                 for (list = 0; list < lists_cnt; list++) {
819                         u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
820
821                         if (ids_processed >= max_ids - 1)
822                                 break;
823
824                         part_id = packed_id_list[ids_processed++];
825
826                         if (ids_count[list] == 1) { /* Global Notification */
827                                 __do_sched_recv_cb(part_id, 0, false);
828                                 continue;
829                         }
830
831                         /* Per vCPU Notification */
832                         for (idx = 0; idx < ids_count[list]; idx++) {
833                                 if (ids_processed >= max_ids - 1)
834                                         break;
835
836                                 vcpu_id = packed_id_list[ids_processed++];
837
838                                 __do_sched_recv_cb(part_id, vcpu_id, true);
839                         }
840                 }
841         } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
842 }
843
844 static int ffa_run(struct ffa_device *dev, u16 vcpu)
845 {
846         ffa_value_t ret;
847         u32 target = dev->vm_id << 16 | vcpu;
848
849         invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
850
851         while (ret.a0 == FFA_INTERRUPT)
852                 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
853                               &ret);
854
855         if (ret.a0 == FFA_ERROR)
856                 return ffa_to_linux_errno((int)ret.a2);
857
858         return 0;
859 }
860
861 static void ffa_set_up_mem_ops_native_flag(void)
862 {
863         if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
864             !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
865                 drv_info->mem_ops_native = true;
866 }
867
868 static u32 ffa_api_version_get(void)
869 {
870         return drv_info->version;
871 }
872
873 static int ffa_partition_info_get(const char *uuid_str,
874                                   struct ffa_partition_info *buffer)
875 {
876         int count;
877         uuid_t uuid;
878         struct ffa_partition_info *pbuf;
879
880         if (uuid_parse(uuid_str, &uuid)) {
881                 pr_err("invalid uuid (%s)\n", uuid_str);
882                 return -ENODEV;
883         }
884
885         count = ffa_partition_probe(&uuid, &pbuf);
886         if (count <= 0)
887                 return -ENOENT;
888
889         memcpy(buffer, pbuf, sizeof(*pbuf) * count);
890         kfree(pbuf);
891         return 0;
892 }
893
894 static void ffa_mode_32bit_set(struct ffa_device *dev)
895 {
896         dev->mode_32bit = true;
897 }
898
899 static int ffa_sync_send_receive(struct ffa_device *dev,
900                                  struct ffa_send_direct_data *data)
901 {
902         return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
903                                        dev->mode_32bit, data);
904 }
905
906 static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
907 {
908         return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
909 }
910
911 static int ffa_memory_share(struct ffa_mem_ops_args *args)
912 {
913         if (drv_info->mem_ops_native)
914                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
915
916         return ffa_memory_ops(FFA_MEM_SHARE, args);
917 }
918
919 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
920 {
921         /* Note that upon a successful MEM_LEND request the caller
922          * must ensure that the memory region specified is not accessed
923          * until a successful MEM_RECALIM call has been made.
924          * On systems with a hypervisor present this will been enforced,
925          * however on systems without a hypervisor the responsibility
926          * falls to the calling kernel driver to prevent access.
927          */
928         if (drv_info->mem_ops_native)
929                 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
930
931         return ffa_memory_ops(FFA_MEM_LEND, args);
932 }
933
934 #define FFA_SECURE_PARTITION_ID_FLAG    BIT(15)
935
936 #define ffa_notifications_disabled()    (!drv_info->notif_enabled)
937
938 enum notify_type {
939         NON_SECURE_VM,
940         SECURE_PARTITION,
941         FRAMEWORK,
942 };
943
944 struct notifier_cb_info {
945         struct hlist_node hnode;
946         ffa_notifier_cb cb;
947         void *cb_data;
948         enum notify_type type;
949 };
950
951 static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
952                                     void *cb_data, bool is_registration)
953 {
954         struct ffa_dev_part_info *partition;
955         bool cb_valid;
956
957         if (ffa_notifications_disabled())
958                 return -EOPNOTSUPP;
959
960         partition = xa_load(&drv_info->partition_info, part_id);
961         if (!partition) {
962                 pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
963                 return -EINVAL;
964         }
965
966         write_lock(&partition->rw_lock);
967
968         cb_valid = !!partition->callback;
969         if (!(is_registration ^ cb_valid)) {
970                 write_unlock(&partition->rw_lock);
971                 return -EINVAL;
972         }
973
974         partition->callback = callback;
975         partition->cb_data = cb_data;
976
977         write_unlock(&partition->rw_lock);
978         return 0;
979 }
980
981 static int ffa_sched_recv_cb_register(struct ffa_device *dev,
982                                       ffa_sched_recv_cb cb, void *cb_data)
983 {
984         return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
985 }
986
987 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
988 {
989         return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
990 }
991
992 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
993 {
994         return ffa_notification_bind_common(dst_id, bitmap, flags, true);
995 }
996
997 static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
998 {
999         return ffa_notification_bind_common(dst_id, bitmap, 0, false);
1000 }
1001
1002 /* Should be called while the notify_lock is taken */
1003 static struct notifier_cb_info *
1004 notifier_hash_node_get(u16 notify_id, enum notify_type type)
1005 {
1006         struct notifier_cb_info *node;
1007
1008         hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
1009                 if (type == node->type)
1010                         return node;
1011
1012         return NULL;
1013 }
1014
1015 static int
1016 update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
1017                    void *cb_data, bool is_registration)
1018 {
1019         struct notifier_cb_info *cb_info = NULL;
1020         bool cb_found;
1021
1022         cb_info = notifier_hash_node_get(notify_id, type);
1023         cb_found = !!cb_info;
1024
1025         if (!(is_registration ^ cb_found))
1026                 return -EINVAL;
1027
1028         if (is_registration) {
1029                 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
1030                 if (!cb_info)
1031                         return -ENOMEM;
1032
1033                 cb_info->type = type;
1034                 cb_info->cb = cb;
1035                 cb_info->cb_data = cb_data;
1036
1037                 hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
1038         } else {
1039                 hash_del(&cb_info->hnode);
1040         }
1041
1042         return 0;
1043 }
1044
1045 static enum notify_type ffa_notify_type_get(u16 vm_id)
1046 {
1047         if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
1048                 return SECURE_PARTITION;
1049         else
1050                 return NON_SECURE_VM;
1051 }
1052
1053 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
1054 {
1055         int rc;
1056         enum notify_type type = ffa_notify_type_get(dev->vm_id);
1057
1058         if (ffa_notifications_disabled())
1059                 return -EOPNOTSUPP;
1060
1061         if (notify_id >= FFA_MAX_NOTIFICATIONS)
1062                 return -EINVAL;
1063
1064         mutex_lock(&drv_info->notify_lock);
1065
1066         rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
1067         if (rc) {
1068                 pr_err("Could not unregister notification callback\n");
1069                 mutex_unlock(&drv_info->notify_lock);
1070                 return rc;
1071         }
1072
1073         rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1074
1075         mutex_unlock(&drv_info->notify_lock);
1076
1077         return rc;
1078 }
1079
1080 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1081                               ffa_notifier_cb cb, void *cb_data, int notify_id)
1082 {
1083         int rc;
1084         u32 flags = 0;
1085         enum notify_type type = ffa_notify_type_get(dev->vm_id);
1086
1087         if (ffa_notifications_disabled())
1088                 return -EOPNOTSUPP;
1089
1090         if (notify_id >= FFA_MAX_NOTIFICATIONS)
1091                 return -EINVAL;
1092
1093         mutex_lock(&drv_info->notify_lock);
1094
1095         if (is_per_vcpu)
1096                 flags = PER_VCPU_NOTIFICATION_FLAG;
1097
1098         rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
1099         if (rc) {
1100                 mutex_unlock(&drv_info->notify_lock);
1101                 return rc;
1102         }
1103
1104         rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
1105         if (rc) {
1106                 pr_err("Failed to register callback for %d - %d\n",
1107                        notify_id, rc);
1108                 ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1109         }
1110         mutex_unlock(&drv_info->notify_lock);
1111
1112         return rc;
1113 }
1114
1115 static int ffa_notify_send(struct ffa_device *dev, int notify_id,
1116                            bool is_per_vcpu, u16 vcpu)
1117 {
1118         u32 flags = 0;
1119
1120         if (ffa_notifications_disabled())
1121                 return -EOPNOTSUPP;
1122
1123         if (is_per_vcpu)
1124                 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
1125
1126         return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
1127                                     BIT(notify_id));
1128 }
1129
1130 static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
1131 {
1132         int notify_id;
1133         struct notifier_cb_info *cb_info = NULL;
1134
1135         for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
1136              notify_id++, bitmap >>= 1) {
1137                 if (!(bitmap & 1))
1138                         continue;
1139
1140                 mutex_lock(&drv_info->notify_lock);
1141                 cb_info = notifier_hash_node_get(notify_id, type);
1142                 mutex_unlock(&drv_info->notify_lock);
1143
1144                 if (cb_info && cb_info->cb)
1145                         cb_info->cb(notify_id, cb_info->cb_data);
1146         }
1147 }
1148
1149 static void notif_get_and_handle(void *unused)
1150 {
1151         int rc;
1152         struct ffa_notify_bitmaps bitmaps;
1153
1154         rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
1155                                   SPM_FRAMEWORK_BITMAP, &bitmaps);
1156         if (rc) {
1157                 pr_err("Failed to retrieve notifications with %d!\n", rc);
1158                 return;
1159         }
1160
1161         handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
1162         handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
1163         handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
1164 }
1165
1166 static void
1167 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
1168 {
1169         struct ffa_drv_info *info = cb_data;
1170
1171         if (!is_per_vcpu)
1172                 notif_get_and_handle(info);
1173         else
1174                 smp_call_function_single(vcpu, notif_get_and_handle, info, 0);
1175 }
1176
1177 static void notif_pcpu_irq_work_fn(struct work_struct *work)
1178 {
1179         struct ffa_drv_info *info = container_of(work, struct ffa_drv_info,
1180                                                  notif_pcpu_work);
1181
1182         ffa_self_notif_handle(smp_processor_id(), true, info);
1183 }
1184
1185 static const struct ffa_info_ops ffa_drv_info_ops = {
1186         .api_version_get = ffa_api_version_get,
1187         .partition_info_get = ffa_partition_info_get,
1188 };
1189
1190 static const struct ffa_msg_ops ffa_drv_msg_ops = {
1191         .mode_32bit_set = ffa_mode_32bit_set,
1192         .sync_send_receive = ffa_sync_send_receive,
1193         .indirect_send = ffa_indirect_msg_send,
1194 };
1195
1196 static const struct ffa_mem_ops ffa_drv_mem_ops = {
1197         .memory_reclaim = ffa_memory_reclaim,
1198         .memory_share = ffa_memory_share,
1199         .memory_lend = ffa_memory_lend,
1200 };
1201
1202 static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
1203         .run = ffa_run,
1204 };
1205
1206 static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
1207         .sched_recv_cb_register = ffa_sched_recv_cb_register,
1208         .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
1209         .notify_request = ffa_notify_request,
1210         .notify_relinquish = ffa_notify_relinquish,
1211         .notify_send = ffa_notify_send,
1212 };
1213
1214 static const struct ffa_ops ffa_drv_ops = {
1215         .info_ops = &ffa_drv_info_ops,
1216         .msg_ops = &ffa_drv_msg_ops,
1217         .mem_ops = &ffa_drv_mem_ops,
1218         .cpu_ops = &ffa_drv_cpu_ops,
1219         .notifier_ops = &ffa_drv_notifier_ops,
1220 };
1221
1222 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
1223 {
1224         int count, idx;
1225         struct ffa_partition_info *pbuf, *tpbuf;
1226
1227         count = ffa_partition_probe(uuid, &pbuf);
1228         if (count <= 0)
1229                 return;
1230
1231         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
1232                 if (tpbuf->id == ffa_dev->vm_id)
1233                         uuid_copy(&ffa_dev->uuid, uuid);
1234         kfree(pbuf);
1235 }
1236
1237 static int
1238 ffa_bus_notifier(struct notifier_block *nb, unsigned long action, void *data)
1239 {
1240         struct device *dev = data;
1241         struct ffa_device *fdev = to_ffa_dev(dev);
1242
1243         if (action == BUS_NOTIFY_BIND_DRIVER) {
1244                 struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
1245                 const struct ffa_device_id *id_table= ffa_drv->id_table;
1246
1247                 /*
1248                  * FF-A v1.1 provides UUID for each partition as part of the
1249                  * discovery API, the discovered UUID must be populated in the
1250                  * device's UUID and there is no need to workaround by copying
1251                  * the same from the driver table.
1252                  */
1253                 if (uuid_is_null(&fdev->uuid))
1254                         ffa_device_match_uuid(fdev, &id_table->uuid);
1255
1256                 return NOTIFY_OK;
1257         }
1258
1259         return NOTIFY_DONE;
1260 }
1261
1262 static struct notifier_block ffa_bus_nb = {
1263         .notifier_call = ffa_bus_notifier,
1264 };
1265
1266 static int ffa_setup_partitions(void)
1267 {
1268         int count, idx, ret;
1269         uuid_t uuid;
1270         struct ffa_device *ffa_dev;
1271         struct ffa_dev_part_info *info;
1272         struct ffa_partition_info *pbuf, *tpbuf;
1273
1274         if (drv_info->version == FFA_VERSION_1_0) {
1275                 ret = bus_register_notifier(&ffa_bus_type, &ffa_bus_nb);
1276                 if (ret)
1277                         pr_err("Failed to register FF-A bus notifiers\n");
1278         }
1279
1280         count = ffa_partition_probe(&uuid_null, &pbuf);
1281         if (count <= 0) {
1282                 pr_info("%s: No partitions found, error %d\n", __func__, count);
1283                 return -EINVAL;
1284         }
1285
1286         xa_init(&drv_info->partition_info);
1287         for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
1288                 import_uuid(&uuid, (u8 *)tpbuf->uuid);
1289
1290                 /* Note that if the UUID will be uuid_null, that will require
1291                  * ffa_bus_notifier() to find the UUID of this partition id
1292                  * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1293                  * provides UUID here for each partition as part of the
1294                  * discovery API and the same is passed.
1295                  */
1296                 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
1297                 if (!ffa_dev) {
1298                         pr_err("%s: failed to register partition ID 0x%x\n",
1299                                __func__, tpbuf->id);
1300                         continue;
1301                 }
1302
1303                 ffa_dev->properties = tpbuf->properties;
1304
1305                 if (drv_info->version > FFA_VERSION_1_0 &&
1306                     !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
1307                         ffa_mode_32bit_set(ffa_dev);
1308
1309                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1310                 if (!info) {
1311                         ffa_device_unregister(ffa_dev);
1312                         continue;
1313                 }
1314                 rwlock_init(&info->rw_lock);
1315                 ret = xa_insert(&drv_info->partition_info, tpbuf->id,
1316                                 info, GFP_KERNEL);
1317                 if (ret) {
1318                         pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
1319                                __func__, tpbuf->id, ret);
1320                         ffa_device_unregister(ffa_dev);
1321                         kfree(info);
1322                 }
1323         }
1324
1325         kfree(pbuf);
1326
1327         /* Allocate for the host */
1328         info = kzalloc(sizeof(*info), GFP_KERNEL);
1329         if (!info) {
1330                 pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
1331                        __func__, drv_info->vm_id);
1332                 /* Already registered devices are freed on bus_exit */
1333                 ffa_partitions_cleanup();
1334                 return -ENOMEM;
1335         }
1336
1337         rwlock_init(&info->rw_lock);
1338         ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
1339                         info, GFP_KERNEL);
1340         if (ret) {
1341                 pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
1342                        __func__, drv_info->vm_id, ret);
1343                 kfree(info);
1344                 /* Already registered devices are freed on bus_exit */
1345                 ffa_partitions_cleanup();
1346         }
1347
1348         return ret;
1349 }
1350
1351 static void ffa_partitions_cleanup(void)
1352 {
1353         struct ffa_dev_part_info *info;
1354         unsigned long idx;
1355
1356         xa_for_each(&drv_info->partition_info, idx, info) {
1357                 xa_erase(&drv_info->partition_info, idx);
1358                 kfree(info);
1359         }
1360
1361         xa_destroy(&drv_info->partition_info);
1362 }
1363
1364 /* FFA FEATURE IDs */
1365 #define FFA_FEAT_NOTIFICATION_PENDING_INT       (1)
1366 #define FFA_FEAT_SCHEDULE_RECEIVER_INT          (2)
1367 #define FFA_FEAT_MANAGED_EXIT_INT               (3)
1368
1369 static irqreturn_t ffa_sched_recv_irq_handler(int irq, void *irq_data)
1370 {
1371         struct ffa_pcpu_irq *pcpu = irq_data;
1372         struct ffa_drv_info *info = pcpu->info;
1373
1374         queue_work(info->notif_pcpu_wq, &info->sched_recv_irq_work);
1375
1376         return IRQ_HANDLED;
1377 }
1378
1379 static irqreturn_t notif_pend_irq_handler(int irq, void *irq_data)
1380 {
1381         struct ffa_pcpu_irq *pcpu = irq_data;
1382         struct ffa_drv_info *info = pcpu->info;
1383
1384         queue_work_on(smp_processor_id(), info->notif_pcpu_wq,
1385                       &info->notif_pcpu_work);
1386
1387         return IRQ_HANDLED;
1388 }
1389
1390 static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
1391 {
1392         ffa_notification_info_get();
1393 }
1394
1395 static int ffa_irq_map(u32 id)
1396 {
1397         char *err_str;
1398         int ret, irq, intid;
1399
1400         if (id == FFA_FEAT_NOTIFICATION_PENDING_INT)
1401                 err_str = "Notification Pending Interrupt";
1402         else if (id == FFA_FEAT_SCHEDULE_RECEIVER_INT)
1403                 err_str = "Schedule Receiver Interrupt";
1404         else
1405                 err_str = "Unknown ID";
1406
1407         /* The returned intid is assumed to be SGI donated to NS world */
1408         ret = ffa_features(id, 0, &intid, NULL);
1409         if (ret < 0) {
1410                 if (ret != -EOPNOTSUPP)
1411                         pr_err("Failed to retrieve FF-A %s %u\n", err_str, id);
1412                 return ret;
1413         }
1414
1415         if (acpi_disabled) {
1416                 struct of_phandle_args oirq = {};
1417                 struct device_node *gic;
1418
1419                 /* Only GICv3 supported currently with the device tree */
1420                 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1421                 if (!gic)
1422                         return -ENXIO;
1423
1424                 oirq.np = gic;
1425                 oirq.args_count = 1;
1426                 oirq.args[0] = intid;
1427                 irq = irq_create_of_mapping(&oirq);
1428                 of_node_put(gic);
1429 #ifdef CONFIG_ACPI
1430         } else {
1431                 irq = acpi_register_gsi(NULL, intid, ACPI_EDGE_SENSITIVE,
1432                                         ACPI_ACTIVE_HIGH);
1433 #endif
1434         }
1435
1436         if (irq <= 0) {
1437                 pr_err("Failed to create IRQ mapping!\n");
1438                 return -ENODATA;
1439         }
1440
1441         return irq;
1442 }
1443
1444 static void ffa_irq_unmap(unsigned int irq)
1445 {
1446         if (!irq)
1447                 return;
1448         irq_dispose_mapping(irq);
1449 }
1450
1451 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
1452 {
1453         if (drv_info->sched_recv_irq)
1454                 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
1455         if (drv_info->notif_pend_irq)
1456                 enable_percpu_irq(drv_info->notif_pend_irq, IRQ_TYPE_NONE);
1457         return 0;
1458 }
1459
1460 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
1461 {
1462         if (drv_info->sched_recv_irq)
1463                 disable_percpu_irq(drv_info->sched_recv_irq);
1464         if (drv_info->notif_pend_irq)
1465                 disable_percpu_irq(drv_info->notif_pend_irq);
1466         return 0;
1467 }
1468
1469 static void ffa_uninit_pcpu_irq(void)
1470 {
1471         if (drv_info->cpuhp_state) {
1472                 cpuhp_remove_state(drv_info->cpuhp_state);
1473                 drv_info->cpuhp_state = 0;
1474         }
1475
1476         if (drv_info->notif_pcpu_wq) {
1477                 destroy_workqueue(drv_info->notif_pcpu_wq);
1478                 drv_info->notif_pcpu_wq = NULL;
1479         }
1480
1481         if (drv_info->sched_recv_irq)
1482                 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
1483
1484         if (drv_info->notif_pend_irq)
1485                 free_percpu_irq(drv_info->notif_pend_irq, drv_info->irq_pcpu);
1486
1487         if (drv_info->irq_pcpu) {
1488                 free_percpu(drv_info->irq_pcpu);
1489                 drv_info->irq_pcpu = NULL;
1490         }
1491 }
1492
1493 static int ffa_init_pcpu_irq(void)
1494 {
1495         struct ffa_pcpu_irq __percpu *irq_pcpu;
1496         int ret, cpu;
1497
1498         irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
1499         if (!irq_pcpu)
1500                 return -ENOMEM;
1501
1502         for_each_present_cpu(cpu)
1503                 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
1504
1505         drv_info->irq_pcpu = irq_pcpu;
1506
1507         if (drv_info->sched_recv_irq) {
1508                 ret = request_percpu_irq(drv_info->sched_recv_irq,
1509                                          ffa_sched_recv_irq_handler,
1510                                          "ARM-FFA-SRI", irq_pcpu);
1511                 if (ret) {
1512                         pr_err("Error registering percpu SRI nIRQ %d : %d\n",
1513                                drv_info->sched_recv_irq, ret);
1514                         drv_info->sched_recv_irq = 0;
1515                         return ret;
1516                 }
1517         }
1518
1519         if (drv_info->notif_pend_irq) {
1520                 ret = request_percpu_irq(drv_info->notif_pend_irq,
1521                                          notif_pend_irq_handler,
1522                                          "ARM-FFA-NPI", irq_pcpu);
1523                 if (ret) {
1524                         pr_err("Error registering percpu NPI nIRQ %d : %d\n",
1525                                drv_info->notif_pend_irq, ret);
1526                         drv_info->notif_pend_irq = 0;
1527                         return ret;
1528                 }
1529         }
1530
1531         INIT_WORK(&drv_info->sched_recv_irq_work, ffa_sched_recv_irq_work_fn);
1532         INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
1533         drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
1534         if (!drv_info->notif_pcpu_wq)
1535                 return -EINVAL;
1536
1537         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
1538                                 ffa_cpuhp_pcpu_irq_enable,
1539                                 ffa_cpuhp_pcpu_irq_disable);
1540
1541         if (ret < 0)
1542                 return ret;
1543
1544         drv_info->cpuhp_state = ret;
1545         return 0;
1546 }
1547
1548 static void ffa_notifications_cleanup(void)
1549 {
1550         ffa_uninit_pcpu_irq();
1551         ffa_irq_unmap(drv_info->sched_recv_irq);
1552         drv_info->sched_recv_irq = 0;
1553         ffa_irq_unmap(drv_info->notif_pend_irq);
1554         drv_info->notif_pend_irq = 0;
1555
1556         if (drv_info->bitmap_created) {
1557                 ffa_notification_bitmap_destroy();
1558                 drv_info->bitmap_created = false;
1559         }
1560         drv_info->notif_enabled = false;
1561 }
1562
1563 static void ffa_notifications_setup(void)
1564 {
1565         int ret;
1566
1567         ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
1568         if (!ret) {
1569                 ret = ffa_notification_bitmap_create();
1570                 if (ret) {
1571                         pr_err("Notification bitmap create error %d\n", ret);
1572                         return;
1573                 }
1574
1575                 drv_info->bitmap_created = true;
1576         }
1577
1578         ret = ffa_irq_map(FFA_FEAT_SCHEDULE_RECEIVER_INT);
1579         if (ret > 0)
1580                 drv_info->sched_recv_irq = ret;
1581
1582         ret = ffa_irq_map(FFA_FEAT_NOTIFICATION_PENDING_INT);
1583         if (ret > 0)
1584                 drv_info->notif_pend_irq = ret;
1585
1586         if (!drv_info->sched_recv_irq && !drv_info->notif_pend_irq)
1587                 goto cleanup;
1588
1589         ret = ffa_init_pcpu_irq();
1590         if (ret)
1591                 goto cleanup;
1592
1593         hash_init(drv_info->notifier_hash);
1594         mutex_init(&drv_info->notify_lock);
1595
1596         drv_info->notif_enabled = true;
1597         return;
1598 cleanup:
1599         pr_info("Notification setup failed %d, not enabled\n", ret);
1600         ffa_notifications_cleanup();
1601 }
1602
1603 static int __init ffa_init(void)
1604 {
1605         int ret;
1606
1607         ret = ffa_transport_init(&invoke_ffa_fn);
1608         if (ret)
1609                 return ret;
1610
1611         drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
1612         if (!drv_info) {
1613                 return -ENOMEM;
1614         }
1615
1616         ret = ffa_version_check(&drv_info->version);
1617         if (ret)
1618                 goto free_drv_info;
1619
1620         if (ffa_id_get(&drv_info->vm_id)) {
1621                 pr_err("failed to obtain VM id for self\n");
1622                 ret = -ENODEV;
1623                 goto free_drv_info;
1624         }
1625
1626         drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
1627         if (!drv_info->rx_buffer) {
1628                 ret = -ENOMEM;
1629                 goto free_pages;
1630         }
1631
1632         drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
1633         if (!drv_info->tx_buffer) {
1634                 ret = -ENOMEM;
1635                 goto free_pages;
1636         }
1637
1638         ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
1639                            virt_to_phys(drv_info->rx_buffer),
1640                            RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
1641         if (ret) {
1642                 pr_err("failed to register FFA RxTx buffers\n");
1643                 goto free_pages;
1644         }
1645
1646         mutex_init(&drv_info->rx_lock);
1647         mutex_init(&drv_info->tx_lock);
1648
1649         ffa_set_up_mem_ops_native_flag();
1650
1651         ffa_notifications_setup();
1652
1653         ret = ffa_setup_partitions();
1654         if (ret) {
1655                 pr_err("failed to setup partitions\n");
1656                 goto cleanup_notifs;
1657         }
1658
1659         ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
1660                                        drv_info, true);
1661         if (ret)
1662                 pr_info("Failed to register driver sched callback %d\n", ret);
1663
1664         return 0;
1665
1666 cleanup_notifs:
1667         ffa_notifications_cleanup();
1668 free_pages:
1669         if (drv_info->tx_buffer)
1670                 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
1671         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
1672 free_drv_info:
1673         kfree(drv_info);
1674         return ret;
1675 }
1676 module_init(ffa_init);
1677
1678 static void __exit ffa_exit(void)
1679 {
1680         ffa_notifications_cleanup();
1681         ffa_partitions_cleanup();
1682         ffa_rxtx_unmap(drv_info->vm_id);
1683         free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
1684         free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
1685         kfree(drv_info);
1686 }
1687 module_exit(ffa_exit);
1688
1689 MODULE_ALIAS("arm-ffa");
1690 MODULE_AUTHOR("Sudeep Holla <[email protected]>");
1691 MODULE_DESCRIPTION("Arm FF-A interface driver");
1692 MODULE_LICENSE("GPL v2");
This page took 0.128843 seconds and 4 git commands to generate.