1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <linux/objtool.h>
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/cc_platform.h>
33 #include <asm/hypervisor.h>
34 #include <drm/drm_ioctl.h>
36 #include "vmwgfx_drv.h"
37 #include "vmwgfx_msg_x86.h"
38 #include "vmwgfx_msg_arm64.h"
39 #include "vmwgfx_mksstat.h"
41 #define MESSAGE_STATUS_SUCCESS 0x0001
42 #define MESSAGE_STATUS_DORECV 0x0002
43 #define MESSAGE_STATUS_CPT 0x0010
44 #define MESSAGE_STATUS_HB 0x0080
46 #define RPCI_PROTOCOL_NUM 0x49435052
47 #define GUESTMSG_FLAG_COOKIE 0x80000000
51 #define VMW_PORT_CMD_MSG 30
52 #define VMW_PORT_CMD_HB_MSG 0
53 #define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
54 #define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
55 #define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
56 #define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
57 #define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
59 #define VMW_PORT_CMD_MKS_GUEST_STATS 85
60 #define VMW_PORT_CMD_MKSGS_RESET (0 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
61 #define VMW_PORT_CMD_MKSGS_ADD_PPN (1 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
62 #define VMW_PORT_CMD_MKSGS_REMOVE_PPN (2 << 16 | VMW_PORT_CMD_MKS_GUEST_STATS)
64 #define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
66 #define MAX_USER_MSG_LENGTH PAGE_SIZE
68 static u32 vmw_msg_enabled = 1;
86 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
87 /* Kernel mksGuestStats counter names and desciptions; same order as enum mksstat_kern_stats_t */
88 static const char* const mksstat_kern_name_desc[MKSSTAT_KERN_COUNT][2] =
90 { "vmw_execbuf_ioctl", "vmw_execbuf_ioctl" },
91 { "vmw_cotable_resize", "vmw_cotable_resize" },
98 * @channel: RPC channel
101 * Returns: 0 on success
103 static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
105 u32 ecx, edx, esi, edi;
107 vmware_hypercall6(VMW_PORT_CMD_OPEN_CHANNEL,
108 (protocol | GUESTMSG_FLAG_COOKIE), 0,
109 &ecx, &edx, &esi, &edi);
111 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
114 channel->channel_id = HIGH_WORD(edx);
115 channel->cookie_high = esi;
116 channel->cookie_low = edi;
126 * @channel: RPC channel
128 * Returns: 0 on success
130 static int vmw_close_channel(struct rpc_channel *channel)
134 vmware_hypercall5(VMW_PORT_CMD_CLOSE_CHANNEL,
135 0, channel->channel_id << 16,
136 channel->cookie_high,
140 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
147 * vmw_port_hb_out - Send the message payload either through the
148 * high-bandwidth port if available, or through the backdoor otherwise.
149 * @channel: The rpc channel.
150 * @msg: NULL-terminated message.
151 * @hb: Whether the high-bandwidth port is available.
153 * Return: The port status.
155 static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
156 const char *msg, bool hb)
159 unsigned long msg_len = strlen(msg);
161 /* HB port can't access encrypted memory. */
162 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
163 vmware_hypercall_hb_out(
164 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
166 channel->channel_id << 16,
167 (uintptr_t) msg, channel->cookie_low,
168 channel->cookie_high,
174 /* HB port not available. Send the message 4 bytes at a time. */
175 ecx = MESSAGE_STATUS_SUCCESS << 16;
176 while (msg_len && (HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS)) {
177 unsigned int bytes = min_t(size_t, msg_len, 4);
178 unsigned long word = 0;
180 memcpy(&word, msg, bytes);
184 vmware_hypercall5(VMW_PORT_CMD_MSG |
185 (MSG_TYPE_SENDPAYLOAD << 16),
186 word, channel->channel_id << 16,
187 channel->cookie_high,
196 * vmw_port_hb_in - Receive the message payload either through the
197 * high-bandwidth port if available, or through the backdoor otherwise.
198 * @channel: The rpc channel.
199 * @reply: Pointer to buffer holding reply.
200 * @reply_len: Length of the reply.
201 * @hb: Whether the high-bandwidth port is available.
203 * Return: The port status.
205 static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
206 unsigned long reply_len, bool hb)
210 /* HB port can't access encrypted memory */
211 if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
212 vmware_hypercall_hb_in(
213 (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
215 channel->channel_id << 16,
216 channel->cookie_high,
217 (uintptr_t) reply, channel->cookie_low,
223 /* HB port not available. Retrieve the message 4 bytes at a time. */
224 ecx = MESSAGE_STATUS_SUCCESS << 16;
226 unsigned int bytes = min_t(unsigned long, reply_len, 4);
228 vmware_hypercall7(VMW_PORT_CMD_MSG |
229 (MSG_TYPE_RECVPAYLOAD << 16),
230 MESSAGE_STATUS_SUCCESS,
231 channel->channel_id << 16,
232 channel->cookie_high,
236 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
239 memcpy(reply, &ebx, bytes);
249 * vmw_send_msg: Sends a message to the host
251 * @channel: RPC channel
252 * @msg: NULL terminated string
254 * Returns: 0 on success
256 static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
259 size_t msg_len = strlen(msg);
262 while (retries < RETRIES) {
265 vmware_hypercall5(VMW_PORT_CMD_SENDSIZE,
266 msg_len, channel->channel_id << 16,
267 channel->cookie_high,
271 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
272 /* Expected success. Give up. */
277 ebx = vmw_port_hb_out(channel, msg,
278 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
280 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
282 } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
283 /* A checkpoint occurred. Retry. */
292 STACK_FRAME_NON_STANDARD(vmw_send_msg);
296 * vmw_recv_msg: Receives a message from the host
298 * Note: It is the caller's responsibility to call kfree() on msg.
300 * @channel: channel opened by vmw_open_channel
301 * @msg: [OUT] message received from the host
302 * @msg_len: message length
304 static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
316 while (retries < RETRIES) {
319 vmware_hypercall7(VMW_PORT_CMD_RECVSIZE,
320 0, channel->channel_id << 16,
321 channel->cookie_high,
325 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
326 DRM_ERROR("Failed to get reply size for host message.\n");
330 /* No reply available. This is okay. */
331 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
335 reply = kzalloc(reply_len + 1, GFP_KERNEL);
337 DRM_ERROR("Cannot allocate memory for host message reply.\n");
343 ebx = vmw_port_hb_in(channel, reply, reply_len,
344 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
345 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
348 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
349 /* A checkpoint occurred. Retry. */
356 reply[reply_len] = '\0';
358 vmware_hypercall5(VMW_PORT_CMD_RECVSTATUS,
359 MESSAGE_STATUS_SUCCESS,
360 channel->channel_id << 16,
361 channel->cookie_high,
365 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
368 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
369 /* A checkpoint occurred. Retry. */
382 *msg_len = reply_len;
387 STACK_FRAME_NON_STANDARD(vmw_recv_msg);
391 * vmw_host_get_guestinfo: Gets a GuestInfo parameter
393 * Gets the value of a GuestInfo.* parameter. The value returned will be in
394 * a string, and it is up to the caller to post-process.
396 * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3
397 * @buffer: if NULL, *reply_len will contain reply size.
398 * @length: size of the reply_buf. Set to size of reply upon return
400 * Returns: 0 on success
402 int vmw_host_get_guestinfo(const char *guest_info_param,
403 char *buffer, size_t *length)
405 struct rpc_channel channel;
406 char *msg, *reply = NULL;
407 size_t reply_len = 0;
409 if (!vmw_msg_enabled)
412 if (!guest_info_param || !length)
415 msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
417 DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
422 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
425 if (vmw_send_msg(&channel, msg) ||
426 vmw_recv_msg(&channel, (void *) &reply, &reply_len))
429 vmw_close_channel(&channel);
430 if (buffer && reply && reply_len > 0) {
431 /* Remove reply code, which are the first 2 characters of
434 reply_len = max(reply_len - 2, (size_t) 0);
435 reply_len = min(reply_len, *length);
438 memcpy(buffer, reply + 2, reply_len);
449 vmw_close_channel(&channel);
454 DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
461 * vmw_host_printf: Sends a log message to the host
463 * @fmt: Regular printf format string and arguments
465 * Returns: 0 on success
468 int vmw_host_printf(const char *fmt, ...)
471 struct rpc_channel channel;
476 if (!vmw_msg_enabled)
483 log = kvasprintf(GFP_KERNEL, fmt, ap);
486 DRM_ERROR("Cannot allocate memory for the log message.\n");
490 msg = kasprintf(GFP_KERNEL, "log %s", log);
492 DRM_ERROR("Cannot allocate memory for host log message.\n");
497 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
500 if (vmw_send_msg(&channel, msg))
503 vmw_close_channel(&channel);
510 vmw_close_channel(&channel);
514 DRM_ERROR("Failed to send host log message.\n");
521 * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space
523 * Sends a message from user-space to host.
524 * Can also receive a result from host and return that to user-space.
526 * @dev: Identifies the drm device.
527 * @data: Pointer to the ioctl argument.
528 * @file_priv: Identifies the caller.
529 * Return: Zero on success, negative error code on error.
532 int vmw_msg_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv)
535 struct drm_vmw_msg_arg *arg =
536 (struct drm_vmw_msg_arg *)data;
537 struct rpc_channel channel;
541 msg = kmalloc(MAX_USER_MSG_LENGTH, GFP_KERNEL);
543 DRM_ERROR("Cannot allocate memory for log message.\n");
547 length = strncpy_from_user(msg, (void __user *)((unsigned long)arg->send),
548 MAX_USER_MSG_LENGTH);
549 if (length < 0 || length >= MAX_USER_MSG_LENGTH) {
550 DRM_ERROR("Userspace message access failure.\n");
556 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) {
557 DRM_ERROR("Failed to open channel.\n");
561 if (vmw_send_msg(&channel, msg)) {
562 DRM_ERROR("Failed to send message to host.\n");
566 if (!arg->send_only) {
568 size_t reply_len = 0;
570 if (vmw_recv_msg(&channel, (void *) &reply, &reply_len)) {
571 DRM_ERROR("Failed to receive message from host.\n");
574 if (reply && reply_len > 0) {
575 if (copy_to_user((void __user *)((unsigned long)arg->receive),
577 DRM_ERROR("Failed to copy message to userspace.\n");
581 arg->receive_len = (__u32)reply_len;
586 vmw_close_channel(&channel);
592 vmw_close_channel(&channel);
600 * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content
602 * @arr: Array to reset.
603 * @size: Array length.
605 static inline void reset_ppn_array(PPN64 *arr, size_t size)
609 BUG_ON(!arr || size == 0);
611 for (i = 0; i < size; ++i)
612 arr[i] = INVALID_PPN64;
616 * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from
617 * the hypervisor. All related pages should be subsequently unpinned or freed.
620 static inline void hypervisor_ppn_reset_all(void)
622 vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
626 * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the
627 * hypervisor. Any related userspace pages should be pinned in advance.
629 * @pfn: Physical page number of the instance descriptor
631 static inline void hypervisor_ppn_add(PPN64 pfn)
633 vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsigned long)pfn);
637 * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from
638 * the hypervisor. All related pages should be subsequently unpinned or freed.
640 * @pfn: Physical page number of the instance descriptor
642 static inline void hypervisor_ppn_remove(PPN64 pfn)
644 vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsigned long)pfn);
647 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
649 /* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */
650 #define MKSSTAT_KERNEL_PAGES_ORDER 2
651 /* Header to the text description of mksGuestStat instance descriptor */
652 #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
655 * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record
656 * for the respective mksGuestStat index.
658 * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record.
659 * @pstat: Pointer to array of MKSGuestStatCounterTime.
660 * @pinfo: Pointer to array of MKSGuestStatInfoEntry.
661 * @pstrs: Pointer to current end of the name/description sequence.
662 * Return: Pointer to the new end of the names/description sequence.
665 static inline char *mksstat_init_record_time(mksstat_kern_stats_t stat_idx,
666 MKSGuestStatCounterTime *pstat, MKSGuestStatInfoEntry *pinfo, char *pstrs)
668 char *const pstrd = pstrs + strlen(mksstat_kern_name_desc[stat_idx][0]) + 1;
669 strcpy(pstrs, mksstat_kern_name_desc[stat_idx][0]);
670 strcpy(pstrd, mksstat_kern_name_desc[stat_idx][1]);
672 pinfo[stat_idx].name.s = pstrs;
673 pinfo[stat_idx].description.s = pstrd;
674 pinfo[stat_idx].flags = MKS_GUEST_STAT_FLAG_TIME;
675 pinfo[stat_idx].stat.counterTime = &pstat[stat_idx];
677 return pstrd + strlen(mksstat_kern_name_desc[stat_idx][1]) + 1;
681 * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and
682 * kernel-internal counters. Adds PFN mapping to the hypervisor.
684 * Create a single mksGuestStat instance descriptor and corresponding structures
685 * for all kernel-internal counters. The corresponding PFNs are mapped with the
688 * @ppage: Output pointer to page containing the instance descriptor.
689 * Return: Zero on success, negative error code on error.
692 static int mksstat_init_kern_id(struct page **ppage)
694 MKSGuestStatInstanceDescriptor *pdesc;
695 MKSGuestStatCounterTime *pstat;
696 MKSGuestStatInfoEntry *pinfo;
697 char *pstrs, *pstrs_acc;
699 /* Allocate pages for the kernel-internal instance descriptor */
700 struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, MKSSTAT_KERNEL_PAGES_ORDER);
705 pdesc = page_address(page);
706 pstat = vmw_mksstat_get_kern_pstat(pdesc);
707 pinfo = vmw_mksstat_get_kern_pinfo(pdesc);
708 pstrs = vmw_mksstat_get_kern_pstrs(pdesc);
710 /* Set up all kernel-internal counters and corresponding structures */
712 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_EXECBUF, pstat, pinfo, pstrs_acc);
713 pstrs_acc = mksstat_init_record_time(MKSSTAT_KERN_COTABLE_RESIZE, pstat, pinfo, pstrs_acc);
715 /* Add new counters above, in their order of appearance in mksstat_kern_stats_t */
717 BUG_ON(pstrs_acc - pstrs > PAGE_SIZE);
719 /* Set up the kernel-internal instance descriptor */
720 pdesc->reservedMBZ = 0;
721 pdesc->statStartVA = (uintptr_t)pstat;
722 pdesc->strsStartVA = (uintptr_t)pstrs;
723 pdesc->statLength = sizeof(*pstat) * MKSSTAT_KERN_COUNT;
724 pdesc->infoLength = sizeof(*pinfo) * MKSSTAT_KERN_COUNT;
725 pdesc->strsLength = pstrs_acc - pstrs;
726 snprintf(pdesc->description, ARRAY_SIZE(pdesc->description) - 1, "%s pid=%d",
727 MKSSTAT_KERNEL_DESCRIPTION, current->pid);
729 pdesc->statPPNs[0] = page_to_pfn(virt_to_page(pstat));
730 reset_ppn_array(pdesc->statPPNs + 1, ARRAY_SIZE(pdesc->statPPNs) - 1);
732 pdesc->infoPPNs[0] = page_to_pfn(virt_to_page(pinfo));
733 reset_ppn_array(pdesc->infoPPNs + 1, ARRAY_SIZE(pdesc->infoPPNs) - 1);
735 pdesc->strsPPNs[0] = page_to_pfn(virt_to_page(pstrs));
736 reset_ppn_array(pdesc->strsPPNs + 1, ARRAY_SIZE(pdesc->strsPPNs) - 1);
740 hypervisor_ppn_add((PPN64)page_to_pfn(page));
746 * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal
747 * mksGuestStat instance descriptor.
749 * Find a slot for a single kernel-internal mksGuestStat instance descriptor.
750 * In case no such was already present, allocate a new one and set up a kernel-
751 * internal mksGuestStat instance descriptor for the former.
753 * @pid: Process for which a slot is sought.
754 * @dev_priv: Identifies the drm private device.
755 * Return: Non-negative slot on success, negative error code on error.
758 int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
760 const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
763 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
764 const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
766 /* Check if an instance descriptor for this pid is already present */
767 if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot]))
770 /* Set up a new instance descriptor for this pid */
771 if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) {
772 const int ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
775 /* Reset top-timer tracking for this slot */
776 dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
778 atomic_set(&dev_priv->mksstat_kern_pids[slot], pid);
782 atomic_set(&dev_priv->mksstat_kern_pids[slot], 0);
793 * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating
794 * mksGuestStat instance-descriptor page and unpins all related user pages.
796 * Unpin all user pages realated to this instance descriptor and free
797 * the instance-descriptor page itself.
799 * @page: Page of the instance descriptor.
802 static void vmw_mksstat_cleanup_descriptor(struct page *page)
804 MKSGuestStatInstanceDescriptor *pdesc = page_address(page);
807 for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
808 unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
810 for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
811 unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
813 for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
814 unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
820 * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors
821 * from the hypervisor.
823 * Discard all hypervisor PFN mappings, containing active mksGuestState instance
824 * descriptors, unpin the related userspace pages and free the related kernel pages.
826 * @dev_priv: Identifies the drm private device.
827 * Return: Zero on success, negative error code on error.
830 int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
835 /* Discard all PFN mappings with the hypervisor */
836 hypervisor_ppn_reset_all();
838 /* Discard all userspace-originating instance descriptors and unpin all related pages */
839 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) {
840 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
845 if (pid0 != MKSSTAT_PID_RESERVED) {
846 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_user_pids[i], pid0, MKSSTAT_PID_RESERVED);
852 struct page *const page = dev_priv->mksstat_user_pages[i];
856 dev_priv->mksstat_user_pages[i] = NULL;
857 atomic_set(&dev_priv->mksstat_user_pids[i], 0);
859 vmw_mksstat_cleanup_descriptor(page);
867 #if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
868 /* Discard all kernel-internal instance descriptors and free all related pages */
869 for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) {
870 const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
875 if (pid0 != MKSSTAT_PID_RESERVED) {
876 const pid_t pid1 = atomic_cmpxchg(&dev_priv->mksstat_kern_pids[i], pid0, MKSSTAT_PID_RESERVED);
882 struct page *const page = dev_priv->mksstat_kern_pages[i];
886 dev_priv->mksstat_kern_pages[i] = NULL;
887 atomic_set(&dev_priv->mksstat_kern_pids[i], 0);
889 __free_pages(page, MKSSTAT_KERNEL_PAGES_ORDER);
902 * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors
903 * from the hypervisor.
905 * Discard all hypervisor PFN mappings, containing active mksGuestStat instance
906 * descriptors, unpin the related userspace pages and free the related kernel pages.
908 * @dev: Identifies the drm device.
909 * @data: Pointer to the ioctl argument.
910 * @file_priv: Identifies the caller; unused.
911 * Return: Zero on success, negative error code on error.
914 int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data,
915 struct drm_file *file_priv)
917 struct vmw_private *const dev_priv = vmw_priv(dev);
918 return vmw_mksstat_remove_all(dev_priv);
922 * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat
923 * instance descriptor and registers that with the hypervisor.
925 * Create a hypervisor PFN mapping, containing a single mksGuestStat instance
926 * descriptor and pin the corresponding userspace pages.
928 * @dev: Identifies the drm device.
929 * @data: Pointer to the ioctl argument.
930 * @file_priv: Identifies the caller; unused.
931 * Return: Zero on success, negative error code on error.
934 int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
935 struct drm_file *file_priv)
937 struct drm_vmw_mksstat_add_arg *arg =
938 (struct drm_vmw_mksstat_add_arg *) data;
940 struct vmw_private *const dev_priv = vmw_priv(dev);
942 const size_t num_pages_stat = PFN_UP(arg->stat_len);
943 const size_t num_pages_info = PFN_UP(arg->info_len);
944 const size_t num_pages_strs = PFN_UP(arg->strs_len);
949 MKSGuestStatInstanceDescriptor *pdesc;
950 struct page *page = NULL;
951 struct page **pages_stat = NULL;
952 struct page **pages_info = NULL;
953 struct page **pages_strs = NULL;
955 int ret_err = -ENOMEM;
959 if (!arg->stat || !arg->info || !arg->strs)
962 if (!arg->stat_len || !arg->info_len || !arg->strs_len)
965 if (!arg->description)
968 if (num_pages_stat > ARRAY_SIZE(pdesc->statPPNs) ||
969 num_pages_info > ARRAY_SIZE(pdesc->infoPPNs) ||
970 num_pages_strs > ARRAY_SIZE(pdesc->strsPPNs))
973 /* Find an available slot in the mksGuestStats user array and reserve it */
974 for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot)
975 if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED))
978 if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids))
981 BUG_ON(dev_priv->mksstat_user_pages[slot]);
983 /* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
984 pages_stat = (struct page **)kmalloc_array(
985 ARRAY_SIZE(pdesc->statPPNs) +
986 ARRAY_SIZE(pdesc->infoPPNs) +
987 ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
992 pages_info = pages_stat + ARRAY_SIZE(pdesc->statPPNs);
993 pages_strs = pages_info + ARRAY_SIZE(pdesc->infoPPNs);
995 /* Allocate a page for the instance descriptor */
996 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1001 /* Set up the instance descriptor */
1002 pdesc = page_address(page);
1004 pdesc->reservedMBZ = 0;
1005 pdesc->statStartVA = arg->stat;
1006 pdesc->strsStartVA = arg->strs;
1007 pdesc->statLength = arg->stat_len;
1008 pdesc->infoLength = arg->info_len;
1009 pdesc->strsLength = arg->strs_len;
1010 desc_len = strncpy_from_user(pdesc->description, u64_to_user_ptr(arg->description),
1011 ARRAY_SIZE(pdesc->description) - 1);
1018 reset_ppn_array(pdesc->statPPNs, ARRAY_SIZE(pdesc->statPPNs));
1019 reset_ppn_array(pdesc->infoPPNs, ARRAY_SIZE(pdesc->infoPPNs));
1020 reset_ppn_array(pdesc->strsPPNs, ARRAY_SIZE(pdesc->strsPPNs));
1022 /* Pin mksGuestStat user pages and store those in the instance descriptor */
1023 nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat);
1024 if (num_pages_stat != nr_pinned_stat)
1027 for (i = 0; i < num_pages_stat; ++i)
1028 pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
1030 nr_pinned_info = pin_user_pages_fast(arg->info, num_pages_info, FOLL_LONGTERM, pages_info);
1031 if (num_pages_info != nr_pinned_info)
1034 for (i = 0; i < num_pages_info; ++i)
1035 pdesc->infoPPNs[i] = page_to_pfn(pages_info[i]);
1037 nr_pinned_strs = pin_user_pages_fast(arg->strs, num_pages_strs, FOLL_LONGTERM, pages_strs);
1038 if (num_pages_strs != nr_pinned_strs)
1041 for (i = 0; i < num_pages_strs; ++i)
1042 pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
1044 /* Send the descriptor to the host via a hypervisor call. The mksGuestStat
1045 pages will remain in use until the user requests a matching remove stats
1046 or a stats reset occurs. */
1047 hypervisor_ppn_add((PPN64)page_to_pfn(page));
1049 dev_priv->mksstat_user_pages[slot] = page;
1050 atomic_set(&dev_priv->mksstat_user_pids[slot], task_pgrp_vnr(current));
1054 DRM_DEV_INFO(dev->dev, "pid=%d arg.description='%.*s' id=%zu\n", current->pid, (int)desc_len, pdesc->description, slot);
1060 if (nr_pinned_strs > 0)
1061 unpin_user_pages(pages_strs, nr_pinned_strs);
1064 if (nr_pinned_info > 0)
1065 unpin_user_pages(pages_info, nr_pinned_info);
1068 if (nr_pinned_stat > 0)
1069 unpin_user_pages(pages_stat, nr_pinned_stat);
1072 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1081 * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat
1082 * instance descriptor from the hypervisor.
1084 * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance
1085 * descriptor and unpin the corresponding userspace pages.
1087 * @dev: Identifies the drm device.
1088 * @data: Pointer to the ioctl argument.
1089 * @file_priv: Identifies the caller; unused.
1090 * Return: Zero on success, negative error code on error.
1093 int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data,
1094 struct drm_file *file_priv)
1096 struct drm_vmw_mksstat_remove_arg *arg =
1097 (struct drm_vmw_mksstat_remove_arg *) data;
1099 struct vmw_private *const dev_priv = vmw_priv(dev);
1101 const size_t slot = arg->id;
1104 if (slot >= ARRAY_SIZE(dev_priv->mksstat_user_pids))
1107 DRM_DEV_INFO(dev->dev, "pid=%d arg.id=%zu\n", current->pid, slot);
1109 pgid = task_pgrp_vnr(current);
1110 pid = atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], pgid, MKSSTAT_PID_RESERVED);
1116 struct page *const page = dev_priv->mksstat_user_pages[slot];
1120 dev_priv->mksstat_user_pages[slot] = NULL;
1121 atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
1123 hypervisor_ppn_remove((PPN64)page_to_pfn(page));
1125 vmw_mksstat_cleanup_descriptor(page);
1133 * vmw_disable_backdoor: Disables all backdoor communication
1134 * with the hypervisor.
1136 void vmw_disable_backdoor(void)
1138 vmw_msg_enabled = 0;