]> Git Repo - linux.git/blob - drivers/scsi/mpt3sas/mpt3sas_ctl.c
blk-mq: punt failed direct issue to dispatch list
[linux.git] / drivers / scsi / mpt3sas / mpt3sas_ctl.c
1 /*
2  * Management Module Support for MPT (Message Passing Technology) based
3  * controllers
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: [email protected])
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62
63
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66
67
68 /**
69  * enum block_state - blocking state
70  * @NON_BLOCKING: non blocking
71  * @BLOCKING: blocking
72  *
73  * These states are for ioctls that need to wait for a response
74  * from firmware, so they probably require sleep.
75  */
76 enum block_state {
77         NON_BLOCKING,
78         BLOCKING,
79 };
80
81 /**
82  * _ctl_display_some_debug - debug routine
83  * @ioc: per adapter object
84  * @smid: system request message index
85  * @calling_function_name: string pass from calling function
86  * @mpi_reply: reply message frame
87  * Context: none.
88  *
89  * Function for displaying debug info helpful when debugging issues
90  * in this module.
91  */
92 static void
93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94         char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95 {
96         Mpi2ConfigRequest_t *mpi_request;
97         char *desc = NULL;
98
99         if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100                 return;
101
102         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103         switch (mpi_request->Function) {
104         case MPI2_FUNCTION_SCSI_IO_REQUEST:
105         {
106                 Mpi2SCSIIORequest_t *scsi_request =
107                     (Mpi2SCSIIORequest_t *)mpi_request;
108
109                 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110                     "scsi_io, cmd(0x%02x), cdb_len(%d)",
111                     scsi_request->CDB.CDB32[0],
112                     le16_to_cpu(scsi_request->IoFlags) & 0xF);
113                 desc = ioc->tmp_string;
114                 break;
115         }
116         case MPI2_FUNCTION_SCSI_TASK_MGMT:
117                 desc = "task_mgmt";
118                 break;
119         case MPI2_FUNCTION_IOC_INIT:
120                 desc = "ioc_init";
121                 break;
122         case MPI2_FUNCTION_IOC_FACTS:
123                 desc = "ioc_facts";
124                 break;
125         case MPI2_FUNCTION_CONFIG:
126         {
127                 Mpi2ConfigRequest_t *config_request =
128                     (Mpi2ConfigRequest_t *)mpi_request;
129
130                 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131                     "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132                     (config_request->Header.PageType &
133                      MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134                     config_request->Header.PageNumber);
135                 desc = ioc->tmp_string;
136                 break;
137         }
138         case MPI2_FUNCTION_PORT_FACTS:
139                 desc = "port_facts";
140                 break;
141         case MPI2_FUNCTION_PORT_ENABLE:
142                 desc = "port_enable";
143                 break;
144         case MPI2_FUNCTION_EVENT_NOTIFICATION:
145                 desc = "event_notification";
146                 break;
147         case MPI2_FUNCTION_FW_DOWNLOAD:
148                 desc = "fw_download";
149                 break;
150         case MPI2_FUNCTION_FW_UPLOAD:
151                 desc = "fw_upload";
152                 break;
153         case MPI2_FUNCTION_RAID_ACTION:
154                 desc = "raid_action";
155                 break;
156         case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157         {
158                 Mpi2SCSIIORequest_t *scsi_request =
159                     (Mpi2SCSIIORequest_t *)mpi_request;
160
161                 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162                     "raid_pass, cmd(0x%02x), cdb_len(%d)",
163                     scsi_request->CDB.CDB32[0],
164                     le16_to_cpu(scsi_request->IoFlags) & 0xF);
165                 desc = ioc->tmp_string;
166                 break;
167         }
168         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169                 desc = "sas_iounit_cntl";
170                 break;
171         case MPI2_FUNCTION_SATA_PASSTHROUGH:
172                 desc = "sata_pass";
173                 break;
174         case MPI2_FUNCTION_DIAG_BUFFER_POST:
175                 desc = "diag_buffer_post";
176                 break;
177         case MPI2_FUNCTION_DIAG_RELEASE:
178                 desc = "diag_release";
179                 break;
180         case MPI2_FUNCTION_SMP_PASSTHROUGH:
181                 desc = "smp_passthrough";
182                 break;
183         }
184
185         if (!desc)
186                 return;
187
188         ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
189
190         if (!mpi_reply)
191                 return;
192
193         if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
194                 ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
195                          le16_to_cpu(mpi_reply->IOCStatus),
196                          le32_to_cpu(mpi_reply->IOCLogInfo));
197
198         if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
199             mpi_request->Function ==
200             MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
201                 Mpi2SCSIIOReply_t *scsi_reply =
202                     (Mpi2SCSIIOReply_t *)mpi_reply;
203                 struct _sas_device *sas_device = NULL;
204                 struct _pcie_device *pcie_device = NULL;
205
206                 sas_device = mpt3sas_get_sdev_by_handle(ioc,
207                     le16_to_cpu(scsi_reply->DevHandle));
208                 if (sas_device) {
209                         ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
210                                  (u64)sas_device->sas_address,
211                                  sas_device->phy);
212                         ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
213                                  (u64)sas_device->enclosure_logical_id,
214                                  sas_device->slot);
215                         sas_device_put(sas_device);
216                 }
217                 if (!sas_device) {
218                         pcie_device = mpt3sas_get_pdev_by_handle(ioc,
219                                 le16_to_cpu(scsi_reply->DevHandle));
220                         if (pcie_device) {
221                                 ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
222                                          (unsigned long long)pcie_device->wwid,
223                                          pcie_device->port_num);
224                                 if (pcie_device->enclosure_handle != 0)
225                                         ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
226                                                  (u64)pcie_device->enclosure_logical_id,
227                                                  pcie_device->slot);
228                                 pcie_device_put(pcie_device);
229                         }
230                 }
231                 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
232                         ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
233                                  scsi_reply->SCSIState,
234                                  scsi_reply->SCSIStatus);
235         }
236 }
237
238 /**
239  * mpt3sas_ctl_done - ctl module completion routine
240  * @ioc: per adapter object
241  * @smid: system request message index
242  * @msix_index: MSIX table index supplied by the OS
243  * @reply: reply message frame(lower 32bit addr)
244  * Context: none.
245  *
246  * The callback handler when using ioc->ctl_cb_idx.
247  *
248  * Return: 1 meaning mf should be freed from _base_interrupt
249  *         0 means the mf is freed from this function.
250  */
251 u8
252 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
253         u32 reply)
254 {
255         MPI2DefaultReply_t *mpi_reply;
256         Mpi2SCSIIOReply_t *scsiio_reply;
257         Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
258         const void *sense_data;
259         u32 sz;
260
261         if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
262                 return 1;
263         if (ioc->ctl_cmds.smid != smid)
264                 return 1;
265         ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
266         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
267         if (mpi_reply) {
268                 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
269                 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
270                 /* get sense data */
271                 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
272                     mpi_reply->Function ==
273                     MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
274                         scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
275                         if (scsiio_reply->SCSIState &
276                             MPI2_SCSI_STATE_AUTOSENSE_VALID) {
277                                 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
278                                     le32_to_cpu(scsiio_reply->SenseCount));
279                                 sense_data = mpt3sas_base_get_sense_buffer(ioc,
280                                     smid);
281                                 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
282                         }
283                 }
284                 /*
285                  * Get Error Response data for NVMe device. The ctl_cmds.sense
286                  * buffer is used to store the Error Response data.
287                  */
288                 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
289                         nvme_error_reply =
290                             (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
291                         sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
292                             le16_to_cpu(nvme_error_reply->ErrorResponseCount));
293                         sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
294                         memcpy(ioc->ctl_cmds.sense, sense_data, sz);
295                 }
296         }
297
298         _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
299         ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
300         complete(&ioc->ctl_cmds.done);
301         return 1;
302 }
303
304 /**
305  * _ctl_check_event_type - determines when an event needs logging
306  * @ioc: per adapter object
307  * @event: firmware event
308  *
309  * The bitmask in ioc->event_type[] indicates which events should be
310  * be saved in the driver event_log.  This bitmask is set by application.
311  *
312  * Return: 1 when event should be captured, or zero means no match.
313  */
314 static int
315 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
316 {
317         u16 i;
318         u32 desired_event;
319
320         if (event >= 128 || !event || !ioc->event_log)
321                 return 0;
322
323         desired_event = (1 << (event % 32));
324         if (!desired_event)
325                 desired_event = 1;
326         i = event / 32;
327         return desired_event & ioc->event_type[i];
328 }
329
330 /**
331  * mpt3sas_ctl_add_to_event_log - add event
332  * @ioc: per adapter object
333  * @mpi_reply: reply message frame
334  */
335 void
336 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
337         Mpi2EventNotificationReply_t *mpi_reply)
338 {
339         struct MPT3_IOCTL_EVENTS *event_log;
340         u16 event;
341         int i;
342         u32 sz, event_data_sz;
343         u8 send_aen = 0;
344
345         if (!ioc->event_log)
346                 return;
347
348         event = le16_to_cpu(mpi_reply->Event);
349
350         if (_ctl_check_event_type(ioc, event)) {
351
352                 /* insert entry into circular event_log */
353                 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
354                 event_log = ioc->event_log;
355                 event_log[i].event = event;
356                 event_log[i].context = ioc->event_context++;
357
358                 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
359                 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
360                 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
361                 memcpy(event_log[i].data, mpi_reply->EventData, sz);
362                 send_aen = 1;
363         }
364
365         /* This aen_event_read_flag flag is set until the
366          * application has read the event log.
367          * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
368          */
369         if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
370             (send_aen && !ioc->aen_event_read_flag)) {
371                 ioc->aen_event_read_flag = 1;
372                 wake_up_interruptible(&ctl_poll_wait);
373                 if (async_queue)
374                         kill_fasync(&async_queue, SIGIO, POLL_IN);
375         }
376 }
377
378 /**
379  * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
380  * @ioc: per adapter object
381  * @msix_index: MSIX table index supplied by the OS
382  * @reply: reply message frame(lower 32bit addr)
383  * Context: interrupt.
384  *
385  * This function merely adds a new work task into ioc->firmware_event_thread.
386  * The tasks are worked from _firmware_event_work in user context.
387  *
388  * Return: 1 meaning mf should be freed from _base_interrupt
389  *         0 means the mf is freed from this function.
390  */
391 u8
392 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
393         u32 reply)
394 {
395         Mpi2EventNotificationReply_t *mpi_reply;
396
397         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
398         if (mpi_reply)
399                 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
400         return 1;
401 }
402
403 /**
404  * _ctl_verify_adapter - validates ioc_number passed from application
405  * @ioc_number: ?
406  * @iocpp: The ioc pointer is returned in this.
407  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
408  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
409  *
410  * Return: (-1) means error, else ioc_number.
411  */
412 static int
413 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
414                                                         int mpi_version)
415 {
416         struct MPT3SAS_ADAPTER *ioc;
417         int version = 0;
418         /* global ioc lock to protect controller on list operations */
419         spin_lock(&gioc_lock);
420         list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
421                 if (ioc->id != ioc_number)
422                         continue;
423                 /* Check whether this ioctl command is from right
424                  * ioctl device or not, if not continue the search.
425                  */
426                 version = ioc->hba_mpi_version_belonged;
427                 /* MPI25_VERSION and MPI26_VERSION uses same ioctl
428                  * device.
429                  */
430                 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
431                         if ((version == MPI25_VERSION) ||
432                                 (version == MPI26_VERSION))
433                                 goto out;
434                         else
435                                 continue;
436                 } else {
437                         if (version != mpi_version)
438                                 continue;
439                 }
440 out:
441                 spin_unlock(&gioc_lock);
442                 *iocpp = ioc;
443                 return ioc_number;
444         }
445         spin_unlock(&gioc_lock);
446         *iocpp = NULL;
447         return -1;
448 }
449
450 /**
451  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
452  * @ioc: per adapter object
453  *
454  * The handler for doing any required cleanup or initialization.
455  */
456 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
457 {
458         int i;
459         u8 issue_reset;
460
461         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
462         for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
463                 if (!(ioc->diag_buffer_status[i] &
464                       MPT3_DIAG_BUFFER_IS_REGISTERED))
465                         continue;
466                 if ((ioc->diag_buffer_status[i] &
467                      MPT3_DIAG_BUFFER_IS_RELEASED))
468                         continue;
469                 mpt3sas_send_diag_release(ioc, i, &issue_reset);
470         }
471 }
472
473 /**
474  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
475  * @ioc: per adapter object
476  *
477  * The handler for doing any required cleanup or initialization.
478  */
479 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
480 {
481         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
482         if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
483                 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
484                 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
485                 complete(&ioc->ctl_cmds.done);
486         }
487 }
488
489 /**
490  * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
491  * @ioc: per adapter object
492  *
493  * The handler for doing any required cleanup or initialization.
494  */
495 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
496 {
497         int i;
498
499         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
500
501         for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
502                 if (!(ioc->diag_buffer_status[i] &
503                       MPT3_DIAG_BUFFER_IS_REGISTERED))
504                         continue;
505                 if ((ioc->diag_buffer_status[i] &
506                      MPT3_DIAG_BUFFER_IS_RELEASED))
507                         continue;
508                 ioc->diag_buffer_status[i] |=
509                         MPT3_DIAG_BUFFER_IS_DIAG_RESET;
510         }
511 }
512
513 /**
514  * _ctl_fasync -
515  * @fd: ?
516  * @filep: ?
517  * @mode: ?
518  *
519  * Called when application request fasyn callback handler.
520  */
521 static int
522 _ctl_fasync(int fd, struct file *filep, int mode)
523 {
524         return fasync_helper(fd, filep, mode, &async_queue);
525 }
526
527 /**
528  * _ctl_poll -
529  * @filep: ?
530  * @wait: ?
531  *
532  */
533 static __poll_t
534 _ctl_poll(struct file *filep, poll_table *wait)
535 {
536         struct MPT3SAS_ADAPTER *ioc;
537
538         poll_wait(filep, &ctl_poll_wait, wait);
539
540         /* global ioc lock to protect controller on list operations */
541         spin_lock(&gioc_lock);
542         list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
543                 if (ioc->aen_event_read_flag) {
544                         spin_unlock(&gioc_lock);
545                         return EPOLLIN | EPOLLRDNORM;
546                 }
547         }
548         spin_unlock(&gioc_lock);
549         return 0;
550 }
551
552 /**
553  * _ctl_set_task_mid - assign an active smid to tm request
554  * @ioc: per adapter object
555  * @karg: (struct mpt3_ioctl_command)
556  * @tm_request: pointer to mf from user space
557  *
558  * Return: 0 when an smid if found, else fail.
559  * during failure, the reply frame is filled.
560  */
561 static int
562 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
563         Mpi2SCSITaskManagementRequest_t *tm_request)
564 {
565         u8 found = 0;
566         u16 smid;
567         u16 handle;
568         struct scsi_cmnd *scmd;
569         struct MPT3SAS_DEVICE *priv_data;
570         Mpi2SCSITaskManagementReply_t *tm_reply;
571         u32 sz;
572         u32 lun;
573         char *desc = NULL;
574
575         if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
576                 desc = "abort_task";
577         else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
578                 desc = "query_task";
579         else
580                 return 0;
581
582         lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
583
584         handle = le16_to_cpu(tm_request->DevHandle);
585         for (smid = ioc->scsiio_depth; smid && !found; smid--) {
586                 struct scsiio_tracker *st;
587
588                 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
589                 if (!scmd)
590                         continue;
591                 if (lun != scmd->device->lun)
592                         continue;
593                 priv_data = scmd->device->hostdata;
594                 if (priv_data->sas_target == NULL)
595                         continue;
596                 if (priv_data->sas_target->handle != handle)
597                         continue;
598                 st = scsi_cmd_priv(scmd);
599                 tm_request->TaskMID = cpu_to_le16(st->smid);
600                 found = 1;
601         }
602
603         if (!found) {
604                 dctlprintk(ioc,
605                            ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
606                                     desc, le16_to_cpu(tm_request->DevHandle),
607                                     lun));
608                 tm_reply = ioc->ctl_cmds.reply;
609                 tm_reply->DevHandle = tm_request->DevHandle;
610                 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
611                 tm_reply->TaskType = tm_request->TaskType;
612                 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
613                 tm_reply->VP_ID = tm_request->VP_ID;
614                 tm_reply->VF_ID = tm_request->VF_ID;
615                 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
616                 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
617                     sz))
618                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
619                             __LINE__, __func__);
620                 return 1;
621         }
622
623         dctlprintk(ioc,
624                    ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
625                             desc, le16_to_cpu(tm_request->DevHandle), lun,
626                             le16_to_cpu(tm_request->TaskMID)));
627         return 0;
628 }
629
630 /**
631  * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
632  * @ioc: per adapter object
633  * @karg: (struct mpt3_ioctl_command)
634  * @mf: pointer to mf in user space
635  */
636 static long
637 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
638         void __user *mf)
639 {
640         MPI2RequestHeader_t *mpi_request = NULL, *request;
641         MPI2DefaultReply_t *mpi_reply;
642         Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
643         struct _pcie_device *pcie_device = NULL;
644         u32 ioc_state;
645         u16 smid;
646         u8 timeout;
647         u8 issue_reset;
648         u32 sz, sz_arg;
649         void *psge;
650         void *data_out = NULL;
651         dma_addr_t data_out_dma = 0;
652         size_t data_out_sz = 0;
653         void *data_in = NULL;
654         dma_addr_t data_in_dma = 0;
655         size_t data_in_sz = 0;
656         long ret;
657         u16 wait_state_count;
658         u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
659         u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
660
661         issue_reset = 0;
662
663         if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
664                 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
665                 ret = -EAGAIN;
666                 goto out;
667         }
668
669         wait_state_count = 0;
670         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
671         while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
672                 if (wait_state_count++ == 10) {
673                         ioc_err(ioc, "%s: failed due to ioc not operational\n",
674                                 __func__);
675                         ret = -EFAULT;
676                         goto out;
677                 }
678                 ssleep(1);
679                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
680                 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
681                          __func__, wait_state_count);
682         }
683         if (wait_state_count)
684                 ioc_info(ioc, "%s: ioc is operational\n", __func__);
685
686         mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
687         if (!mpi_request) {
688                 ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
689                         __func__);
690                 ret = -ENOMEM;
691                 goto out;
692         }
693
694         /* Check for overflow and wraparound */
695         if (karg.data_sge_offset * 4 > ioc->request_sz ||
696             karg.data_sge_offset > (UINT_MAX / 4)) {
697                 ret = -EINVAL;
698                 goto out;
699         }
700
701         /* copy in request message frame from user */
702         if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
703                 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
704                     __func__);
705                 ret = -EFAULT;
706                 goto out;
707         }
708
709         if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
710                 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
711                 if (!smid) {
712                         ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
713                         ret = -EAGAIN;
714                         goto out;
715                 }
716         } else {
717                 /* Use first reserved smid for passthrough ioctls */
718                 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
719         }
720
721         ret = 0;
722         ioc->ctl_cmds.status = MPT3_CMD_PENDING;
723         memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
724         request = mpt3sas_base_get_msg_frame(ioc, smid);
725         memcpy(request, mpi_request, karg.data_sge_offset*4);
726         ioc->ctl_cmds.smid = smid;
727         data_out_sz = karg.data_out_size;
728         data_in_sz = karg.data_in_size;
729
730         if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
731             mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
732             mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
733             mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
734             mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
735
736                 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
737                 if (!device_handle || (device_handle >
738                     ioc->facts.MaxDevHandle)) {
739                         ret = -EINVAL;
740                         mpt3sas_base_free_smid(ioc, smid);
741                         goto out;
742                 }
743         }
744
745         /* obtain dma-able memory for data transfer */
746         if (data_out_sz) /* WRITE */ {
747                 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
748                                 &data_out_dma, GFP_KERNEL);
749                 if (!data_out) {
750                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
751                             __LINE__, __func__);
752                         ret = -ENOMEM;
753                         mpt3sas_base_free_smid(ioc, smid);
754                         goto out;
755                 }
756                 if (copy_from_user(data_out, karg.data_out_buf_ptr,
757                         data_out_sz)) {
758                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
759                             __LINE__, __func__);
760                         ret =  -EFAULT;
761                         mpt3sas_base_free_smid(ioc, smid);
762                         goto out;
763                 }
764         }
765
766         if (data_in_sz) /* READ */ {
767                 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
768                                 &data_in_dma, GFP_KERNEL);
769                 if (!data_in) {
770                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
771                             __LINE__, __func__);
772                         ret = -ENOMEM;
773                         mpt3sas_base_free_smid(ioc, smid);
774                         goto out;
775                 }
776         }
777
778         psge = (void *)request + (karg.data_sge_offset*4);
779
780         /* send command to firmware */
781         _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
782
783         init_completion(&ioc->ctl_cmds.done);
784         switch (mpi_request->Function) {
785         case MPI2_FUNCTION_NVME_ENCAPSULATED:
786         {
787                 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
788                 /*
789                  * Get the Physical Address of the sense buffer.
790                  * Use Error Response buffer address field to hold the sense
791                  * buffer address.
792                  * Clear the internal sense buffer, which will potentially hold
793                  * the Completion Queue Entry on return, or 0 if no Entry.
794                  * Build the PRPs and set direction bits.
795                  * Send the request.
796                  */
797                 nvme_encap_request->ErrorResponseBaseAddress =
798                     cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
799                 nvme_encap_request->ErrorResponseBaseAddress |=
800                    cpu_to_le64(le32_to_cpu(
801                    mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
802                 nvme_encap_request->ErrorResponseAllocationLength =
803                                         cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
804                 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
805                 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
806                     data_out_dma, data_out_sz, data_in_dma, data_in_sz);
807                 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
808                         dtmprintk(ioc,
809                                   ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
810                                            device_handle));
811                         mpt3sas_base_free_smid(ioc, smid);
812                         ret = -EINVAL;
813                         goto out;
814                 }
815                 mpt3sas_base_put_smid_nvme_encap(ioc, smid);
816                 break;
817         }
818         case MPI2_FUNCTION_SCSI_IO_REQUEST:
819         case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
820         {
821                 Mpi2SCSIIORequest_t *scsiio_request =
822                     (Mpi2SCSIIORequest_t *)request;
823                 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
824                 scsiio_request->SenseBufferLowAddress =
825                     mpt3sas_base_get_sense_buffer_dma(ioc, smid);
826                 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
827                 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
828                         dtmprintk(ioc,
829                                   ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
830                                            device_handle));
831                         mpt3sas_base_free_smid(ioc, smid);
832                         ret = -EINVAL;
833                         goto out;
834                 }
835                 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
836                     data_in_dma, data_in_sz);
837                 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
838                         ioc->put_smid_scsi_io(ioc, smid, device_handle);
839                 else
840                         mpt3sas_base_put_smid_default(ioc, smid);
841                 break;
842         }
843         case MPI2_FUNCTION_SCSI_TASK_MGMT:
844         {
845                 Mpi2SCSITaskManagementRequest_t *tm_request =
846                     (Mpi2SCSITaskManagementRequest_t *)request;
847
848                 dtmprintk(ioc,
849                           ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
850                                    le16_to_cpu(tm_request->DevHandle),
851                                    tm_request->TaskType));
852                 ioc->got_task_abort_from_ioctl = 1;
853                 if (tm_request->TaskType ==
854                     MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
855                     tm_request->TaskType ==
856                     MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
857                         if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
858                                 mpt3sas_base_free_smid(ioc, smid);
859                                 ioc->got_task_abort_from_ioctl = 0;
860                                 goto out;
861                         }
862                 }
863                 ioc->got_task_abort_from_ioctl = 0;
864
865                 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
866                         dtmprintk(ioc,
867                                   ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
868                                            device_handle));
869                         mpt3sas_base_free_smid(ioc, smid);
870                         ret = -EINVAL;
871                         goto out;
872                 }
873                 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
874                     tm_request->DevHandle));
875                 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
876                     data_in_dma, data_in_sz);
877                 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
878                 break;
879         }
880         case MPI2_FUNCTION_SMP_PASSTHROUGH:
881         {
882                 Mpi2SmpPassthroughRequest_t *smp_request =
883                     (Mpi2SmpPassthroughRequest_t *)mpi_request;
884                 u8 *data;
885
886                 /* ioc determines which port to use */
887                 smp_request->PhysicalPort = 0xFF;
888                 if (smp_request->PassthroughFlags &
889                     MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
890                         data = (u8 *)&smp_request->SGL;
891                 else {
892                         if (unlikely(data_out == NULL)) {
893                                 pr_err("failure at %s:%d/%s()!\n",
894                                     __FILE__, __LINE__, __func__);
895                                 mpt3sas_base_free_smid(ioc, smid);
896                                 ret = -EINVAL;
897                                 goto out;
898                         }
899                         data = data_out;
900                 }
901
902                 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
903                         ioc->ioc_link_reset_in_progress = 1;
904                         ioc->ignore_loginfos = 1;
905                 }
906                 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
907                     data_in_sz);
908                 mpt3sas_base_put_smid_default(ioc, smid);
909                 break;
910         }
911         case MPI2_FUNCTION_SATA_PASSTHROUGH:
912         {
913                 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
914                         dtmprintk(ioc,
915                                   ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
916                                            device_handle));
917                         mpt3sas_base_free_smid(ioc, smid);
918                         ret = -EINVAL;
919                         goto out;
920                 }
921                 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
922                     data_in_sz);
923                 mpt3sas_base_put_smid_default(ioc, smid);
924                 break;
925         }
926         case MPI2_FUNCTION_FW_DOWNLOAD:
927         case MPI2_FUNCTION_FW_UPLOAD:
928         {
929                 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
930                     data_in_sz);
931                 mpt3sas_base_put_smid_default(ioc, smid);
932                 break;
933         }
934         case MPI2_FUNCTION_TOOLBOX:
935         {
936                 Mpi2ToolboxCleanRequest_t *toolbox_request =
937                         (Mpi2ToolboxCleanRequest_t *)mpi_request;
938
939                 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
940                         ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
941                                 data_in_dma, data_in_sz);
942                 } else {
943                         ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
944                                 data_in_dma, data_in_sz);
945                 }
946                 mpt3sas_base_put_smid_default(ioc, smid);
947                 break;
948         }
949         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
950         {
951                 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
952                     (Mpi2SasIoUnitControlRequest_t *)mpi_request;
953
954                 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
955                     || sasiounit_request->Operation ==
956                     MPI2_SAS_OP_PHY_LINK_RESET) {
957                         ioc->ioc_link_reset_in_progress = 1;
958                         ioc->ignore_loginfos = 1;
959                 }
960                 /* drop to default case for posting the request */
961         }
962                 /* fall through */
963         default:
964                 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
965                     data_in_dma, data_in_sz);
966                 mpt3sas_base_put_smid_default(ioc, smid);
967                 break;
968         }
969
970         if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
971                 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
972         else
973                 timeout = karg.timeout;
974         wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
975         if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
976                 Mpi2SCSITaskManagementRequest_t *tm_request =
977                     (Mpi2SCSITaskManagementRequest_t *)mpi_request;
978                 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
979                     tm_request->DevHandle));
980                 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
981         } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
982             mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
983                 ioc->ioc_link_reset_in_progress) {
984                 ioc->ioc_link_reset_in_progress = 0;
985                 ioc->ignore_loginfos = 0;
986         }
987         if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
988                 issue_reset =
989                         mpt3sas_base_check_cmd_timeout(ioc,
990                                 ioc->ctl_cmds.status, mpi_request,
991                                 karg.data_sge_offset);
992                 goto issue_host_reset;
993         }
994
995         mpi_reply = ioc->ctl_cmds.reply;
996
997         if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
998             (ioc->logging_level & MPT_DEBUG_TM)) {
999                 Mpi2SCSITaskManagementReply_t *tm_reply =
1000                     (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1001
1002                 ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
1003                          le16_to_cpu(tm_reply->IOCStatus),
1004                          le32_to_cpu(tm_reply->IOCLogInfo),
1005                          le32_to_cpu(tm_reply->TerminationCount));
1006         }
1007
1008         /* copy out xdata to user */
1009         if (data_in_sz) {
1010                 if (copy_to_user(karg.data_in_buf_ptr, data_in,
1011                     data_in_sz)) {
1012                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
1013                             __LINE__, __func__);
1014                         ret = -ENODATA;
1015                         goto out;
1016                 }
1017         }
1018
1019         /* copy out reply message frame to user */
1020         if (karg.max_reply_bytes) {
1021                 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1022                 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1023                     sz)) {
1024                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
1025                             __LINE__, __func__);
1026                         ret = -ENODATA;
1027                         goto out;
1028                 }
1029         }
1030
1031         /* copy out sense/NVMe Error Response to user */
1032         if (karg.max_sense_bytes && (mpi_request->Function ==
1033             MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1034             MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1035             MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1036                 if (karg.sense_data_ptr == NULL) {
1037                         ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
1038                         goto out;
1039                 }
1040                 sz_arg = (mpi_request->Function ==
1041                 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1042                                                         SCSI_SENSE_BUFFERSIZE;
1043                 sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1044                 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1045                     sz)) {
1046                         pr_err("failure at %s:%d/%s()!\n", __FILE__,
1047                                 __LINE__, __func__);
1048                         ret = -ENODATA;
1049                         goto out;
1050                 }
1051         }
1052
1053  issue_host_reset:
1054         if (issue_reset) {
1055                 ret = -ENODATA;
1056                 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1057                     mpi_request->Function ==
1058                     MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1059                     mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1060                         ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
1061                                  le16_to_cpu(mpi_request->FunctionDependent1));
1062                         mpt3sas_halt_firmware(ioc);
1063                         pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1064                                 le16_to_cpu(mpi_request->FunctionDependent1));
1065                         if (pcie_device && (!ioc->tm_custom_handling))
1066                                 mpt3sas_scsih_issue_locked_tm(ioc,
1067                                   le16_to_cpu(mpi_request->FunctionDependent1),
1068                                   0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1069                                   0, pcie_device->reset_timeout,
1070                                   tr_method);
1071                         else
1072                                 mpt3sas_scsih_issue_locked_tm(ioc,
1073                                   le16_to_cpu(mpi_request->FunctionDependent1),
1074                                   0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1075                                   0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1076                 } else
1077                         mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1078         }
1079
1080  out:
1081         if (pcie_device)
1082                 pcie_device_put(pcie_device);
1083
1084         /* free memory associated with sg buffers */
1085         if (data_in)
1086                 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
1087                     data_in_dma);
1088
1089         if (data_out)
1090                 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
1091                     data_out_dma);
1092
1093         kfree(mpi_request);
1094         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1095         return ret;
1096 }
1097
1098 /**
1099  * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1100  * @ioc: per adapter object
1101  * @arg: user space buffer containing ioctl content
1102  */
1103 static long
1104 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1105 {
1106         struct mpt3_ioctl_iocinfo karg;
1107
1108         dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1109                                  __func__));
1110
1111         memset(&karg, 0 , sizeof(karg));
1112         if (ioc->pfacts)
1113                 karg.port_number = ioc->pfacts[0].PortNumber;
1114         karg.hw_rev = ioc->pdev->revision;
1115         karg.pci_id = ioc->pdev->device;
1116         karg.subsystem_device = ioc->pdev->subsystem_device;
1117         karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1118         karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1119         karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1120         karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1121         karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1122         karg.firmware_version = ioc->facts.FWVersion.Word;
1123         strcpy(karg.driver_version, ioc->driver_name);
1124         strcat(karg.driver_version, "-");
1125         switch  (ioc->hba_mpi_version_belonged) {
1126         case MPI2_VERSION:
1127                 if (ioc->is_warpdrive)
1128                         karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1129                 else
1130                         karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1131                 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1132                 break;
1133         case MPI25_VERSION:
1134         case MPI26_VERSION:
1135                 if (ioc->is_gen35_ioc)
1136                         karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1137                 else
1138                         karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1139                 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1140                 break;
1141         }
1142         karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1143
1144         if (copy_to_user(arg, &karg, sizeof(karg))) {
1145                 pr_err("failure at %s:%d/%s()!\n",
1146                     __FILE__, __LINE__, __func__);
1147                 return -EFAULT;
1148         }
1149         return 0;
1150 }
1151
1152 /**
1153  * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1154  * @ioc: per adapter object
1155  * @arg: user space buffer containing ioctl content
1156  */
1157 static long
1158 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1159 {
1160         struct mpt3_ioctl_eventquery karg;
1161
1162         if (copy_from_user(&karg, arg, sizeof(karg))) {
1163                 pr_err("failure at %s:%d/%s()!\n",
1164                     __FILE__, __LINE__, __func__);
1165                 return -EFAULT;
1166         }
1167
1168         dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1169                                  __func__));
1170
1171         karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1172         memcpy(karg.event_types, ioc->event_type,
1173             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1174
1175         if (copy_to_user(arg, &karg, sizeof(karg))) {
1176                 pr_err("failure at %s:%d/%s()!\n",
1177                     __FILE__, __LINE__, __func__);
1178                 return -EFAULT;
1179         }
1180         return 0;
1181 }
1182
1183 /**
1184  * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1185  * @ioc: per adapter object
1186  * @arg: user space buffer containing ioctl content
1187  */
1188 static long
1189 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1190 {
1191         struct mpt3_ioctl_eventenable karg;
1192
1193         if (copy_from_user(&karg, arg, sizeof(karg))) {
1194                 pr_err("failure at %s:%d/%s()!\n",
1195                     __FILE__, __LINE__, __func__);
1196                 return -EFAULT;
1197         }
1198
1199         dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1200                                  __func__));
1201
1202         memcpy(ioc->event_type, karg.event_types,
1203             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1204         mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1205
1206         if (ioc->event_log)
1207                 return 0;
1208         /* initialize event_log */
1209         ioc->event_context = 0;
1210         ioc->aen_event_read_flag = 0;
1211         ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1212             sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1213         if (!ioc->event_log) {
1214                 pr_err("failure at %s:%d/%s()!\n",
1215                     __FILE__, __LINE__, __func__);
1216                 return -ENOMEM;
1217         }
1218         return 0;
1219 }
1220
1221 /**
1222  * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1223  * @ioc: per adapter object
1224  * @arg: user space buffer containing ioctl content
1225  */
1226 static long
1227 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1228 {
1229         struct mpt3_ioctl_eventreport karg;
1230         u32 number_bytes, max_events, max;
1231         struct mpt3_ioctl_eventreport __user *uarg = arg;
1232
1233         if (copy_from_user(&karg, arg, sizeof(karg))) {
1234                 pr_err("failure at %s:%d/%s()!\n",
1235                     __FILE__, __LINE__, __func__);
1236                 return -EFAULT;
1237         }
1238
1239         dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1240                                  __func__));
1241
1242         number_bytes = karg.hdr.max_data_size -
1243             sizeof(struct mpt3_ioctl_header);
1244         max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1245         max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1246
1247         /* If fewer than 1 event is requested, there must have
1248          * been some type of error.
1249          */
1250         if (!max || !ioc->event_log)
1251                 return -ENODATA;
1252
1253         number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1254         if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1255                 pr_err("failure at %s:%d/%s()!\n",
1256                     __FILE__, __LINE__, __func__);
1257                 return -EFAULT;
1258         }
1259
1260         /* reset flag so SIGIO can restart */
1261         ioc->aen_event_read_flag = 0;
1262         return 0;
1263 }
1264
1265 /**
1266  * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1267  * @ioc: per adapter object
1268  * @arg: user space buffer containing ioctl content
1269  */
1270 static long
1271 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1272 {
1273         struct mpt3_ioctl_diag_reset karg;
1274         int retval;
1275
1276         if (copy_from_user(&karg, arg, sizeof(karg))) {
1277                 pr_err("failure at %s:%d/%s()!\n",
1278                     __FILE__, __LINE__, __func__);
1279                 return -EFAULT;
1280         }
1281
1282         if (ioc->shost_recovery || ioc->pci_error_recovery ||
1283             ioc->is_driver_loading)
1284                 return -EAGAIN;
1285
1286         dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1287                                  __func__));
1288
1289         retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1290         ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
1291         return 0;
1292 }
1293
1294 /**
1295  * _ctl_btdh_search_sas_device - searching for sas device
1296  * @ioc: per adapter object
1297  * @btdh: btdh ioctl payload
1298  */
1299 static int
1300 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1301         struct mpt3_ioctl_btdh_mapping *btdh)
1302 {
1303         struct _sas_device *sas_device;
1304         unsigned long flags;
1305         int rc = 0;
1306
1307         if (list_empty(&ioc->sas_device_list))
1308                 return rc;
1309
1310         spin_lock_irqsave(&ioc->sas_device_lock, flags);
1311         list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1312                 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1313                     btdh->handle == sas_device->handle) {
1314                         btdh->bus = sas_device->channel;
1315                         btdh->id = sas_device->id;
1316                         rc = 1;
1317                         goto out;
1318                 } else if (btdh->bus == sas_device->channel && btdh->id ==
1319                     sas_device->id && btdh->handle == 0xFFFF) {
1320                         btdh->handle = sas_device->handle;
1321                         rc = 1;
1322                         goto out;
1323                 }
1324         }
1325  out:
1326         spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1327         return rc;
1328 }
1329
1330 /**
1331  * _ctl_btdh_search_pcie_device - searching for pcie device
1332  * @ioc: per adapter object
1333  * @btdh: btdh ioctl payload
1334  */
1335 static int
1336 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1337         struct mpt3_ioctl_btdh_mapping *btdh)
1338 {
1339         struct _pcie_device *pcie_device;
1340         unsigned long flags;
1341         int rc = 0;
1342
1343         if (list_empty(&ioc->pcie_device_list))
1344                 return rc;
1345
1346         spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1347         list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1348                 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1349                            btdh->handle == pcie_device->handle) {
1350                         btdh->bus = pcie_device->channel;
1351                         btdh->id = pcie_device->id;
1352                         rc = 1;
1353                         goto out;
1354                 } else if (btdh->bus == pcie_device->channel && btdh->id ==
1355                            pcie_device->id && btdh->handle == 0xFFFF) {
1356                         btdh->handle = pcie_device->handle;
1357                         rc = 1;
1358                         goto out;
1359                 }
1360         }
1361  out:
1362         spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1363         return rc;
1364 }
1365
1366 /**
1367  * _ctl_btdh_search_raid_device - searching for raid device
1368  * @ioc: per adapter object
1369  * @btdh: btdh ioctl payload
1370  */
1371 static int
1372 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1373         struct mpt3_ioctl_btdh_mapping *btdh)
1374 {
1375         struct _raid_device *raid_device;
1376         unsigned long flags;
1377         int rc = 0;
1378
1379         if (list_empty(&ioc->raid_device_list))
1380                 return rc;
1381
1382         spin_lock_irqsave(&ioc->raid_device_lock, flags);
1383         list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1384                 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1385                     btdh->handle == raid_device->handle) {
1386                         btdh->bus = raid_device->channel;
1387                         btdh->id = raid_device->id;
1388                         rc = 1;
1389                         goto out;
1390                 } else if (btdh->bus == raid_device->channel && btdh->id ==
1391                     raid_device->id && btdh->handle == 0xFFFF) {
1392                         btdh->handle = raid_device->handle;
1393                         rc = 1;
1394                         goto out;
1395                 }
1396         }
1397  out:
1398         spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1399         return rc;
1400 }
1401
1402 /**
1403  * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1404  * @ioc: per adapter object
1405  * @arg: user space buffer containing ioctl content
1406  */
1407 static long
1408 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1409 {
1410         struct mpt3_ioctl_btdh_mapping karg;
1411         int rc;
1412
1413         if (copy_from_user(&karg, arg, sizeof(karg))) {
1414                 pr_err("failure at %s:%d/%s()!\n",
1415                     __FILE__, __LINE__, __func__);
1416                 return -EFAULT;
1417         }
1418
1419         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1420                                  __func__));
1421
1422         rc = _ctl_btdh_search_sas_device(ioc, &karg);
1423         if (!rc)
1424                 rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1425         if (!rc)
1426                 _ctl_btdh_search_raid_device(ioc, &karg);
1427
1428         if (copy_to_user(arg, &karg, sizeof(karg))) {
1429                 pr_err("failure at %s:%d/%s()!\n",
1430                     __FILE__, __LINE__, __func__);
1431                 return -EFAULT;
1432         }
1433         return 0;
1434 }
1435
1436 /**
1437  * _ctl_diag_capability - return diag buffer capability
1438  * @ioc: per adapter object
1439  * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1440  *
1441  * returns 1 when diag buffer support is enabled in firmware
1442  */
1443 static u8
1444 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1445 {
1446         u8 rc = 0;
1447
1448         switch (buffer_type) {
1449         case MPI2_DIAG_BUF_TYPE_TRACE:
1450                 if (ioc->facts.IOCCapabilities &
1451                     MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1452                         rc = 1;
1453                 break;
1454         case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1455                 if (ioc->facts.IOCCapabilities &
1456                     MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1457                         rc = 1;
1458                 break;
1459         case MPI2_DIAG_BUF_TYPE_EXTENDED:
1460                 if (ioc->facts.IOCCapabilities &
1461                     MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1462                         rc = 1;
1463         }
1464
1465         return rc;
1466 }
1467
1468
1469 /**
1470  * _ctl_diag_register_2 - wrapper for registering diag buffer support
1471  * @ioc: per adapter object
1472  * @diag_register: the diag_register struct passed in from user space
1473  *
1474  */
1475 static long
1476 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1477         struct mpt3_diag_register *diag_register)
1478 {
1479         int rc, i;
1480         void *request_data = NULL;
1481         dma_addr_t request_data_dma;
1482         u32 request_data_sz = 0;
1483         Mpi2DiagBufferPostRequest_t *mpi_request;
1484         Mpi2DiagBufferPostReply_t *mpi_reply;
1485         u8 buffer_type;
1486         u16 smid;
1487         u16 ioc_status;
1488         u32 ioc_state;
1489         u8 issue_reset = 0;
1490
1491         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1492                                  __func__));
1493
1494         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1495         if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1496                 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1497                         __func__);
1498                 rc = -EAGAIN;
1499                 goto out;
1500         }
1501
1502         if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1503                 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1504                 rc = -EAGAIN;
1505                 goto out;
1506         }
1507
1508         buffer_type = diag_register->buffer_type;
1509         if (!_ctl_diag_capability(ioc, buffer_type)) {
1510                 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1511                         __func__, buffer_type);
1512                 return -EPERM;
1513         }
1514
1515         if (ioc->diag_buffer_status[buffer_type] &
1516             MPT3_DIAG_BUFFER_IS_REGISTERED) {
1517                 ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1518                         __func__, buffer_type);
1519                 return -EINVAL;
1520         }
1521
1522         if (diag_register->requested_buffer_size % 4)  {
1523                 ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
1524                         __func__);
1525                 return -EINVAL;
1526         }
1527
1528         smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1529         if (!smid) {
1530                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1531                 rc = -EAGAIN;
1532                 goto out;
1533         }
1534
1535         rc = 0;
1536         ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1537         memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1538         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1539         ioc->ctl_cmds.smid = smid;
1540
1541         request_data = ioc->diag_buffer[buffer_type];
1542         request_data_sz = diag_register->requested_buffer_size;
1543         ioc->unique_id[buffer_type] = diag_register->unique_id;
1544         ioc->diag_buffer_status[buffer_type] = 0;
1545         memcpy(ioc->product_specific[buffer_type],
1546             diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1547         ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1548
1549         if (request_data) {
1550                 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1551                 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1552                         dma_free_coherent(&ioc->pdev->dev,
1553                                         ioc->diag_buffer_sz[buffer_type],
1554                                         request_data, request_data_dma);
1555                         request_data = NULL;
1556                 }
1557         }
1558
1559         if (request_data == NULL) {
1560                 ioc->diag_buffer_sz[buffer_type] = 0;
1561                 ioc->diag_buffer_dma[buffer_type] = 0;
1562                 request_data = dma_alloc_coherent(&ioc->pdev->dev,
1563                                 request_data_sz, &request_data_dma, GFP_KERNEL);
1564                 if (request_data == NULL) {
1565                         ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
1566                                 __func__, request_data_sz);
1567                         mpt3sas_base_free_smid(ioc, smid);
1568                         return -ENOMEM;
1569                 }
1570                 ioc->diag_buffer[buffer_type] = request_data;
1571                 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1572                 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1573         }
1574
1575         mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1576         mpi_request->BufferType = diag_register->buffer_type;
1577         mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1578         mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1579         mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1580         mpi_request->VF_ID = 0; /* TODO */
1581         mpi_request->VP_ID = 0;
1582
1583         dctlprintk(ioc,
1584                    ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1585                             __func__, request_data,
1586                             (unsigned long long)request_data_dma,
1587                             le32_to_cpu(mpi_request->BufferLength)));
1588
1589         for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1590                 mpi_request->ProductSpecific[i] =
1591                         cpu_to_le32(ioc->product_specific[buffer_type][i]);
1592
1593         init_completion(&ioc->ctl_cmds.done);
1594         mpt3sas_base_put_smid_default(ioc, smid);
1595         wait_for_completion_timeout(&ioc->ctl_cmds.done,
1596             MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1597
1598         if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1599                 issue_reset =
1600                         mpt3sas_base_check_cmd_timeout(ioc,
1601                                 ioc->ctl_cmds.status, mpi_request,
1602                                 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1603                 goto issue_host_reset;
1604         }
1605
1606         /* process the completed Reply Message Frame */
1607         if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1608                 ioc_err(ioc, "%s: no reply message\n", __func__);
1609                 rc = -EFAULT;
1610                 goto out;
1611         }
1612
1613         mpi_reply = ioc->ctl_cmds.reply;
1614         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1615
1616         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1617                 ioc->diag_buffer_status[buffer_type] |=
1618                         MPT3_DIAG_BUFFER_IS_REGISTERED;
1619                 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1620         } else {
1621                 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1622                          __func__,
1623                          ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1624                 rc = -EFAULT;
1625         }
1626
1627  issue_host_reset:
1628         if (issue_reset)
1629                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1630
1631  out:
1632
1633         if (rc && request_data)
1634                 dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1635                     request_data, request_data_dma);
1636
1637         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1638         return rc;
1639 }
1640
1641 /**
1642  * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1643  * @ioc: per adapter object
1644  * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1645  *
1646  * This is called when command line option diag_buffer_enable is enabled
1647  * at driver load time.
1648  */
1649 void
1650 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1651 {
1652         struct mpt3_diag_register diag_register;
1653
1654         memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1655
1656         if (bits_to_register & 1) {
1657                 ioc_info(ioc, "registering trace buffer support\n");
1658                 ioc->diag_trigger_master.MasterData =
1659                     (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1660                 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1661                 /* register for 2MB buffers  */
1662                 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1663                 diag_register.unique_id = 0x7075900;
1664                 _ctl_diag_register_2(ioc,  &diag_register);
1665         }
1666
1667         if (bits_to_register & 2) {
1668                 ioc_info(ioc, "registering snapshot buffer support\n");
1669                 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1670                 /* register for 2MB buffers  */
1671                 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1672                 diag_register.unique_id = 0x7075901;
1673                 _ctl_diag_register_2(ioc,  &diag_register);
1674         }
1675
1676         if (bits_to_register & 4) {
1677                 ioc_info(ioc, "registering extended buffer support\n");
1678                 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1679                 /* register for 2MB buffers  */
1680                 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1681                 diag_register.unique_id = 0x7075901;
1682                 _ctl_diag_register_2(ioc,  &diag_register);
1683         }
1684 }
1685
1686 /**
1687  * _ctl_diag_register - application register with driver
1688  * @ioc: per adapter object
1689  * @arg: user space buffer containing ioctl content
1690  *
1691  * This will allow the driver to setup any required buffers that will be
1692  * needed by firmware to communicate with the driver.
1693  */
1694 static long
1695 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1696 {
1697         struct mpt3_diag_register karg;
1698         long rc;
1699
1700         if (copy_from_user(&karg, arg, sizeof(karg))) {
1701                 pr_err("failure at %s:%d/%s()!\n",
1702                     __FILE__, __LINE__, __func__);
1703                 return -EFAULT;
1704         }
1705
1706         rc = _ctl_diag_register_2(ioc, &karg);
1707         return rc;
1708 }
1709
1710 /**
1711  * _ctl_diag_unregister - application unregister with driver
1712  * @ioc: per adapter object
1713  * @arg: user space buffer containing ioctl content
1714  *
1715  * This will allow the driver to cleanup any memory allocated for diag
1716  * messages and to free up any resources.
1717  */
1718 static long
1719 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1720 {
1721         struct mpt3_diag_unregister karg;
1722         void *request_data;
1723         dma_addr_t request_data_dma;
1724         u32 request_data_sz;
1725         u8 buffer_type;
1726
1727         if (copy_from_user(&karg, arg, sizeof(karg))) {
1728                 pr_err("failure at %s:%d/%s()!\n",
1729                     __FILE__, __LINE__, __func__);
1730                 return -EFAULT;
1731         }
1732
1733         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1734                                  __func__));
1735
1736         buffer_type = karg.unique_id & 0x000000ff;
1737         if (!_ctl_diag_capability(ioc, buffer_type)) {
1738                 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1739                         __func__, buffer_type);
1740                 return -EPERM;
1741         }
1742
1743         if ((ioc->diag_buffer_status[buffer_type] &
1744             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1745                 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1746                         __func__, buffer_type);
1747                 return -EINVAL;
1748         }
1749         if ((ioc->diag_buffer_status[buffer_type] &
1750             MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1751                 ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
1752                         __func__, buffer_type);
1753                 return -EINVAL;
1754         }
1755
1756         if (karg.unique_id != ioc->unique_id[buffer_type]) {
1757                 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1758                         __func__, karg.unique_id);
1759                 return -EINVAL;
1760         }
1761
1762         request_data = ioc->diag_buffer[buffer_type];
1763         if (!request_data) {
1764                 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1765                         __func__, buffer_type);
1766                 return -ENOMEM;
1767         }
1768
1769         request_data_sz = ioc->diag_buffer_sz[buffer_type];
1770         request_data_dma = ioc->diag_buffer_dma[buffer_type];
1771         dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1772                         request_data, request_data_dma);
1773         ioc->diag_buffer[buffer_type] = NULL;
1774         ioc->diag_buffer_status[buffer_type] = 0;
1775         return 0;
1776 }
1777
1778 /**
1779  * _ctl_diag_query - query relevant info associated with diag buffers
1780  * @ioc: per adapter object
1781  * @arg: user space buffer containing ioctl content
1782  *
1783  * The application will send only buffer_type and unique_id.  Driver will
1784  * inspect unique_id first, if valid, fill in all the info.  If unique_id is
1785  * 0x00, the driver will return info specified by Buffer Type.
1786  */
1787 static long
1788 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1789 {
1790         struct mpt3_diag_query karg;
1791         void *request_data;
1792         int i;
1793         u8 buffer_type;
1794
1795         if (copy_from_user(&karg, arg, sizeof(karg))) {
1796                 pr_err("failure at %s:%d/%s()!\n",
1797                     __FILE__, __LINE__, __func__);
1798                 return -EFAULT;
1799         }
1800
1801         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1802                                  __func__));
1803
1804         karg.application_flags = 0;
1805         buffer_type = karg.buffer_type;
1806
1807         if (!_ctl_diag_capability(ioc, buffer_type)) {
1808                 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1809                         __func__, buffer_type);
1810                 return -EPERM;
1811         }
1812
1813         if ((ioc->diag_buffer_status[buffer_type] &
1814             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1815                 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1816                         __func__, buffer_type);
1817                 return -EINVAL;
1818         }
1819
1820         if (karg.unique_id & 0xffffff00) {
1821                 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1822                         ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
1823                                 __func__, karg.unique_id);
1824                         return -EINVAL;
1825                 }
1826         }
1827
1828         request_data = ioc->diag_buffer[buffer_type];
1829         if (!request_data) {
1830                 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
1831                         __func__, buffer_type);
1832                 return -ENOMEM;
1833         }
1834
1835         if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
1836                 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1837                     MPT3_APP_FLAGS_BUFFER_VALID);
1838         else
1839                 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1840                     MPT3_APP_FLAGS_BUFFER_VALID |
1841                     MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
1842
1843         for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1844                 karg.product_specific[i] =
1845                     ioc->product_specific[buffer_type][i];
1846
1847         karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1848         karg.driver_added_buffer_size = 0;
1849         karg.unique_id = ioc->unique_id[buffer_type];
1850         karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1851
1852         if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1853                 ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
1854                         __func__, arg);
1855                 return -EFAULT;
1856         }
1857         return 0;
1858 }
1859
1860 /**
1861  * mpt3sas_send_diag_release - Diag Release Message
1862  * @ioc: per adapter object
1863  * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1864  * @issue_reset: specifies whether host reset is required.
1865  *
1866  */
1867 int
1868 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1869         u8 *issue_reset)
1870 {
1871         Mpi2DiagReleaseRequest_t *mpi_request;
1872         Mpi2DiagReleaseReply_t *mpi_reply;
1873         u16 smid;
1874         u16 ioc_status;
1875         u32 ioc_state;
1876         int rc;
1877
1878         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1879                                  __func__));
1880
1881         rc = 0;
1882         *issue_reset = 0;
1883
1884         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1885         if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1886                 if (ioc->diag_buffer_status[buffer_type] &
1887                     MPT3_DIAG_BUFFER_IS_REGISTERED)
1888                         ioc->diag_buffer_status[buffer_type] |=
1889                             MPT3_DIAG_BUFFER_IS_RELEASED;
1890                 dctlprintk(ioc,
1891                            ioc_info(ioc, "%s: skipping due to FAULT state\n",
1892                                     __func__));
1893                 rc = -EAGAIN;
1894                 goto out;
1895         }
1896
1897         if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1898                 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1899                 rc = -EAGAIN;
1900                 goto out;
1901         }
1902
1903         smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1904         if (!smid) {
1905                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1906                 rc = -EAGAIN;
1907                 goto out;
1908         }
1909
1910         ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1911         memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1912         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1913         ioc->ctl_cmds.smid = smid;
1914
1915         mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1916         mpi_request->BufferType = buffer_type;
1917         mpi_request->VF_ID = 0; /* TODO */
1918         mpi_request->VP_ID = 0;
1919
1920         init_completion(&ioc->ctl_cmds.done);
1921         mpt3sas_base_put_smid_default(ioc, smid);
1922         wait_for_completion_timeout(&ioc->ctl_cmds.done,
1923             MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1924
1925         if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1926                 *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
1927                                 ioc->ctl_cmds.status, mpi_request,
1928                                 sizeof(Mpi2DiagReleaseRequest_t)/4);
1929                 rc = -EFAULT;
1930                 goto out;
1931         }
1932
1933         /* process the completed Reply Message Frame */
1934         if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1935                 ioc_err(ioc, "%s: no reply message\n", __func__);
1936                 rc = -EFAULT;
1937                 goto out;
1938         }
1939
1940         mpi_reply = ioc->ctl_cmds.reply;
1941         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1942
1943         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1944                 ioc->diag_buffer_status[buffer_type] |=
1945                     MPT3_DIAG_BUFFER_IS_RELEASED;
1946                 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1947         } else {
1948                 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1949                          __func__,
1950                          ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1951                 rc = -EFAULT;
1952         }
1953
1954  out:
1955         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1956         return rc;
1957 }
1958
1959 /**
1960  * _ctl_diag_release - request to send Diag Release Message to firmware
1961  * @ioc: ?
1962  * @arg: user space buffer containing ioctl content
1963  *
1964  * This allows ownership of the specified buffer to returned to the driver,
1965  * allowing an application to read the buffer without fear that firmware is
1966  * overwriting information in the buffer.
1967  */
1968 static long
1969 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1970 {
1971         struct mpt3_diag_release karg;
1972         void *request_data;
1973         int rc;
1974         u8 buffer_type;
1975         u8 issue_reset = 0;
1976
1977         if (copy_from_user(&karg, arg, sizeof(karg))) {
1978                 pr_err("failure at %s:%d/%s()!\n",
1979                     __FILE__, __LINE__, __func__);
1980                 return -EFAULT;
1981         }
1982
1983         dctlprintk(ioc, ioc_info(ioc, "%s\n",
1984                                  __func__));
1985
1986         buffer_type = karg.unique_id & 0x000000ff;
1987         if (!_ctl_diag_capability(ioc, buffer_type)) {
1988                 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1989                         __func__, buffer_type);
1990                 return -EPERM;
1991         }
1992
1993         if ((ioc->diag_buffer_status[buffer_type] &
1994             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1995                 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
1996                         __func__, buffer_type);
1997                 return -EINVAL;
1998         }
1999
2000         if (karg.unique_id != ioc->unique_id[buffer_type]) {
2001                 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2002                         __func__, karg.unique_id);
2003                 return -EINVAL;
2004         }
2005
2006         if (ioc->diag_buffer_status[buffer_type] &
2007             MPT3_DIAG_BUFFER_IS_RELEASED) {
2008                 ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
2009                         __func__, buffer_type);
2010                 return 0;
2011         }
2012
2013         request_data = ioc->diag_buffer[buffer_type];
2014
2015         if (!request_data) {
2016                 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2017                         __func__, buffer_type);
2018                 return -ENOMEM;
2019         }
2020
2021         /* buffers were released by due to host reset */
2022         if ((ioc->diag_buffer_status[buffer_type] &
2023             MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2024                 ioc->diag_buffer_status[buffer_type] |=
2025                     MPT3_DIAG_BUFFER_IS_RELEASED;
2026                 ioc->diag_buffer_status[buffer_type] &=
2027                     ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2028                 ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
2029                         __func__, buffer_type);
2030                 return 0;
2031         }
2032
2033         rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2034
2035         if (issue_reset)
2036                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2037
2038         return rc;
2039 }
2040
2041 /**
2042  * _ctl_diag_read_buffer - request for copy of the diag buffer
2043  * @ioc: per adapter object
2044  * @arg: user space buffer containing ioctl content
2045  */
2046 static long
2047 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2048 {
2049         struct mpt3_diag_read_buffer karg;
2050         struct mpt3_diag_read_buffer __user *uarg = arg;
2051         void *request_data, *diag_data;
2052         Mpi2DiagBufferPostRequest_t *mpi_request;
2053         Mpi2DiagBufferPostReply_t *mpi_reply;
2054         int rc, i;
2055         u8 buffer_type;
2056         unsigned long request_size, copy_size;
2057         u16 smid;
2058         u16 ioc_status;
2059         u8 issue_reset = 0;
2060
2061         if (copy_from_user(&karg, arg, sizeof(karg))) {
2062                 pr_err("failure at %s:%d/%s()!\n",
2063                     __FILE__, __LINE__, __func__);
2064                 return -EFAULT;
2065         }
2066
2067         dctlprintk(ioc, ioc_info(ioc, "%s\n",
2068                                  __func__));
2069
2070         buffer_type = karg.unique_id & 0x000000ff;
2071         if (!_ctl_diag_capability(ioc, buffer_type)) {
2072                 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2073                         __func__, buffer_type);
2074                 return -EPERM;
2075         }
2076
2077         if (karg.unique_id != ioc->unique_id[buffer_type]) {
2078                 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2079                         __func__, karg.unique_id);
2080                 return -EINVAL;
2081         }
2082
2083         request_data = ioc->diag_buffer[buffer_type];
2084         if (!request_data) {
2085                 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2086                         __func__, buffer_type);
2087                 return -ENOMEM;
2088         }
2089
2090         request_size = ioc->diag_buffer_sz[buffer_type];
2091
2092         if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2093                 ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
2094                         __func__);
2095                 return -EINVAL;
2096         }
2097
2098         if (karg.starting_offset > request_size)
2099                 return -EINVAL;
2100
2101         diag_data = (void *)(request_data + karg.starting_offset);
2102         dctlprintk(ioc,
2103                    ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2104                             __func__, diag_data, karg.starting_offset,
2105                             karg.bytes_to_read));
2106
2107         /* Truncate data on requests that are too large */
2108         if ((diag_data + karg.bytes_to_read < diag_data) ||
2109             (diag_data + karg.bytes_to_read > request_data + request_size))
2110                 copy_size = request_size - karg.starting_offset;
2111         else
2112                 copy_size = karg.bytes_to_read;
2113
2114         if (copy_to_user((void __user *)uarg->diagnostic_data,
2115             diag_data, copy_size)) {
2116                 ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2117                         __func__, diag_data);
2118                 return -EFAULT;
2119         }
2120
2121         if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2122                 return 0;
2123
2124         dctlprintk(ioc,
2125                    ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
2126                             __func__, buffer_type));
2127         if ((ioc->diag_buffer_status[buffer_type] &
2128             MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2129                 dctlprintk(ioc,
2130                            ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
2131                                     __func__, buffer_type));
2132                 return 0;
2133         }
2134         /* Get a free request frame and save the message context.
2135         */
2136
2137         if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2138                 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2139                 rc = -EAGAIN;
2140                 goto out;
2141         }
2142
2143         smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2144         if (!smid) {
2145                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2146                 rc = -EAGAIN;
2147                 goto out;
2148         }
2149
2150         rc = 0;
2151         ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2152         memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2153         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2154         ioc->ctl_cmds.smid = smid;
2155
2156         mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2157         mpi_request->BufferType = buffer_type;
2158         mpi_request->BufferLength =
2159             cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2160         mpi_request->BufferAddress =
2161             cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2162         for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2163                 mpi_request->ProductSpecific[i] =
2164                         cpu_to_le32(ioc->product_specific[buffer_type][i]);
2165         mpi_request->VF_ID = 0; /* TODO */
2166         mpi_request->VP_ID = 0;
2167
2168         init_completion(&ioc->ctl_cmds.done);
2169         mpt3sas_base_put_smid_default(ioc, smid);
2170         wait_for_completion_timeout(&ioc->ctl_cmds.done,
2171             MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2172
2173         if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2174                 issue_reset =
2175                         mpt3sas_base_check_cmd_timeout(ioc,
2176                                 ioc->ctl_cmds.status, mpi_request,
2177                                 sizeof(Mpi2DiagBufferPostRequest_t)/4);
2178                 goto issue_host_reset;
2179         }
2180
2181         /* process the completed Reply Message Frame */
2182         if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2183                 ioc_err(ioc, "%s: no reply message\n", __func__);
2184                 rc = -EFAULT;
2185                 goto out;
2186         }
2187
2188         mpi_reply = ioc->ctl_cmds.reply;
2189         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2190
2191         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2192                 ioc->diag_buffer_status[buffer_type] |=
2193                     MPT3_DIAG_BUFFER_IS_REGISTERED;
2194                 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2195         } else {
2196                 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2197                          __func__, ioc_status,
2198                          le32_to_cpu(mpi_reply->IOCLogInfo));
2199                 rc = -EFAULT;
2200         }
2201
2202  issue_host_reset:
2203         if (issue_reset)
2204                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2205
2206  out:
2207
2208         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2209         return rc;
2210 }
2211
2212
2213
2214 #ifdef CONFIG_COMPAT
2215 /**
2216  * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2217  * @ioc: per adapter object
2218  * @cmd: ioctl opcode
2219  * @arg: (struct mpt3_ioctl_command32)
2220  *
2221  * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2222  */
2223 static long
2224 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2225         void __user *arg)
2226 {
2227         struct mpt3_ioctl_command32 karg32;
2228         struct mpt3_ioctl_command32 __user *uarg;
2229         struct mpt3_ioctl_command karg;
2230
2231         if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2232                 return -EINVAL;
2233
2234         uarg = (struct mpt3_ioctl_command32 __user *) arg;
2235
2236         if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2237                 pr_err("failure at %s:%d/%s()!\n",
2238                     __FILE__, __LINE__, __func__);
2239                 return -EFAULT;
2240         }
2241
2242         memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2243         karg.hdr.ioc_number = karg32.hdr.ioc_number;
2244         karg.hdr.port_number = karg32.hdr.port_number;
2245         karg.hdr.max_data_size = karg32.hdr.max_data_size;
2246         karg.timeout = karg32.timeout;
2247         karg.max_reply_bytes = karg32.max_reply_bytes;
2248         karg.data_in_size = karg32.data_in_size;
2249         karg.data_out_size = karg32.data_out_size;
2250         karg.max_sense_bytes = karg32.max_sense_bytes;
2251         karg.data_sge_offset = karg32.data_sge_offset;
2252         karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2253         karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2254         karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2255         karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2256         return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2257 }
2258 #endif
2259
2260 /**
2261  * _ctl_ioctl_main - main ioctl entry point
2262  * @file:  (struct file)
2263  * @cmd:  ioctl opcode
2264  * @arg:  user space data buffer
2265  * @compat:  handles 32 bit applications in 64bit os
2266  * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2267  * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2268  */
2269 static long
2270 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2271         u8 compat, u16 mpi_version)
2272 {
2273         struct MPT3SAS_ADAPTER *ioc;
2274         struct mpt3_ioctl_header ioctl_header;
2275         enum block_state state;
2276         long ret = -EINVAL;
2277
2278         /* get IOCTL header */
2279         if (copy_from_user(&ioctl_header, (char __user *)arg,
2280             sizeof(struct mpt3_ioctl_header))) {
2281                 pr_err("failure at %s:%d/%s()!\n",
2282                     __FILE__, __LINE__, __func__);
2283                 return -EFAULT;
2284         }
2285
2286         if (_ctl_verify_adapter(ioctl_header.ioc_number,
2287                                 &ioc, mpi_version) == -1 || !ioc)
2288                 return -ENODEV;
2289
2290         /* pci_access_mutex lock acquired by ioctl path */
2291         mutex_lock(&ioc->pci_access_mutex);
2292
2293         if (ioc->shost_recovery || ioc->pci_error_recovery ||
2294             ioc->is_driver_loading || ioc->remove_host) {
2295                 ret = -EAGAIN;
2296                 goto out_unlock_pciaccess;
2297         }
2298
2299         state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2300         if (state == NON_BLOCKING) {
2301                 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2302                         ret = -EAGAIN;
2303                         goto out_unlock_pciaccess;
2304                 }
2305         } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2306                 ret = -ERESTARTSYS;
2307                 goto out_unlock_pciaccess;
2308         }
2309
2310
2311         switch (cmd) {
2312         case MPT3IOCINFO:
2313                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2314                         ret = _ctl_getiocinfo(ioc, arg);
2315                 break;
2316 #ifdef CONFIG_COMPAT
2317         case MPT3COMMAND32:
2318 #endif
2319         case MPT3COMMAND:
2320         {
2321                 struct mpt3_ioctl_command __user *uarg;
2322                 struct mpt3_ioctl_command karg;
2323
2324 #ifdef CONFIG_COMPAT
2325                 if (compat) {
2326                         ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2327                         break;
2328                 }
2329 #endif
2330                 if (copy_from_user(&karg, arg, sizeof(karg))) {
2331                         pr_err("failure at %s:%d/%s()!\n",
2332                             __FILE__, __LINE__, __func__);
2333                         ret = -EFAULT;
2334                         break;
2335                 }
2336
2337                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2338                         uarg = arg;
2339                         ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2340                 }
2341                 break;
2342         }
2343         case MPT3EVENTQUERY:
2344                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2345                         ret = _ctl_eventquery(ioc, arg);
2346                 break;
2347         case MPT3EVENTENABLE:
2348                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2349                         ret = _ctl_eventenable(ioc, arg);
2350                 break;
2351         case MPT3EVENTREPORT:
2352                 ret = _ctl_eventreport(ioc, arg);
2353                 break;
2354         case MPT3HARDRESET:
2355                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2356                         ret = _ctl_do_reset(ioc, arg);
2357                 break;
2358         case MPT3BTDHMAPPING:
2359                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2360                         ret = _ctl_btdh_mapping(ioc, arg);
2361                 break;
2362         case MPT3DIAGREGISTER:
2363                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2364                         ret = _ctl_diag_register(ioc, arg);
2365                 break;
2366         case MPT3DIAGUNREGISTER:
2367                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2368                         ret = _ctl_diag_unregister(ioc, arg);
2369                 break;
2370         case MPT3DIAGQUERY:
2371                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2372                         ret = _ctl_diag_query(ioc, arg);
2373                 break;
2374         case MPT3DIAGRELEASE:
2375                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2376                         ret = _ctl_diag_release(ioc, arg);
2377                 break;
2378         case MPT3DIAGREADBUFFER:
2379                 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2380                         ret = _ctl_diag_read_buffer(ioc, arg);
2381                 break;
2382         default:
2383                 dctlprintk(ioc,
2384                            ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
2385                                     cmd));
2386                 break;
2387         }
2388
2389         mutex_unlock(&ioc->ctl_cmds.mutex);
2390 out_unlock_pciaccess:
2391         mutex_unlock(&ioc->pci_access_mutex);
2392         return ret;
2393 }
2394
2395 /**
2396  * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2397  * @file: (struct file)
2398  * @cmd: ioctl opcode
2399  * @arg: ?
2400  */
2401 static long
2402 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2403 {
2404         long ret;
2405
2406         /* pass MPI25_VERSION | MPI26_VERSION value,
2407          * to indicate that this ioctl cmd
2408          * came from mpt3ctl ioctl device.
2409          */
2410         ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2411                 MPI25_VERSION | MPI26_VERSION);
2412         return ret;
2413 }
2414
2415 /**
2416  * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2417  * @file: (struct file)
2418  * @cmd: ioctl opcode
2419  * @arg: ?
2420  */
2421 static long
2422 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2423 {
2424         long ret;
2425
2426         /* pass MPI2_VERSION value, to indicate that this ioctl cmd
2427          * came from mpt2ctl ioctl device.
2428          */
2429         ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2430         return ret;
2431 }
2432 #ifdef CONFIG_COMPAT
2433 /**
2434  *_ ctl_ioctl_compat - main ioctl entry point (compat)
2435  * @file: ?
2436  * @cmd: ?
2437  * @arg: ?
2438  *
2439  * This routine handles 32 bit applications in 64bit os.
2440  */
2441 static long
2442 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2443 {
2444         long ret;
2445
2446         ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2447                 MPI25_VERSION | MPI26_VERSION);
2448         return ret;
2449 }
2450
2451 /**
2452  *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2453  * @file: ?
2454  * @cmd: ?
2455  * @arg: ?
2456  *
2457  * This routine handles 32 bit applications in 64bit os.
2458  */
2459 static long
2460 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2461 {
2462         long ret;
2463
2464         ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2465         return ret;
2466 }
2467 #endif
2468
2469 /* scsi host attributes */
2470 /**
2471  * _ctl_version_fw_show - firmware version
2472  * @cdev: pointer to embedded class device
2473  * @attr: ?
2474  * @buf: the buffer returned
2475  *
2476  * A sysfs 'read-only' shost attribute.
2477  */
2478 static ssize_t
2479 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2480         char *buf)
2481 {
2482         struct Scsi_Host *shost = class_to_shost(cdev);
2483         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2484
2485         return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2486             (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2487             (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2488             (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2489             ioc->facts.FWVersion.Word & 0x000000FF);
2490 }
2491 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2492
2493 /**
2494  * _ctl_version_bios_show - bios version
2495  * @cdev: pointer to embedded class device
2496  * @attr: ?
2497  * @buf: the buffer returned
2498  *
2499  * A sysfs 'read-only' shost attribute.
2500  */
2501 static ssize_t
2502 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2503         char *buf)
2504 {
2505         struct Scsi_Host *shost = class_to_shost(cdev);
2506         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2507
2508         u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2509
2510         return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2511             (version & 0xFF000000) >> 24,
2512             (version & 0x00FF0000) >> 16,
2513             (version & 0x0000FF00) >> 8,
2514             version & 0x000000FF);
2515 }
2516 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2517
2518 /**
2519  * _ctl_version_mpi_show - MPI (message passing interface) version
2520  * @cdev: pointer to embedded class device
2521  * @attr: ?
2522  * @buf: the buffer returned
2523  *
2524  * A sysfs 'read-only' shost attribute.
2525  */
2526 static ssize_t
2527 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2528         char *buf)
2529 {
2530         struct Scsi_Host *shost = class_to_shost(cdev);
2531         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2532
2533         return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2534             ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2535 }
2536 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2537
2538 /**
2539  * _ctl_version_product_show - product name
2540  * @cdev: pointer to embedded class device
2541  * @attr: ?
2542  * @buf: the buffer returned
2543  *
2544  * A sysfs 'read-only' shost attribute.
2545  */
2546 static ssize_t
2547 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2548         char *buf)
2549 {
2550         struct Scsi_Host *shost = class_to_shost(cdev);
2551         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2552
2553         return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2554 }
2555 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
2556
2557 /**
2558  * _ctl_version_nvdata_persistent_show - ndvata persistent version
2559  * @cdev: pointer to embedded class device
2560  * @attr: ?
2561  * @buf: the buffer returned
2562  *
2563  * A sysfs 'read-only' shost attribute.
2564  */
2565 static ssize_t
2566 _ctl_version_nvdata_persistent_show(struct device *cdev,
2567         struct device_attribute *attr, char *buf)
2568 {
2569         struct Scsi_Host *shost = class_to_shost(cdev);
2570         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2571
2572         return snprintf(buf, PAGE_SIZE, "%08xh\n",
2573             le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2574 }
2575 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2576         _ctl_version_nvdata_persistent_show, NULL);
2577
2578 /**
2579  * _ctl_version_nvdata_default_show - nvdata default version
2580  * @cdev: pointer to embedded class device
2581  * @attr: ?
2582  * @buf: the buffer returned
2583  *
2584  * A sysfs 'read-only' shost attribute.
2585  */
2586 static ssize_t
2587 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
2588         *attr, char *buf)
2589 {
2590         struct Scsi_Host *shost = class_to_shost(cdev);
2591         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2592
2593         return snprintf(buf, PAGE_SIZE, "%08xh\n",
2594             le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2595 }
2596 static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2597         _ctl_version_nvdata_default_show, NULL);
2598
2599 /**
2600  * _ctl_board_name_show - board name
2601  * @cdev: pointer to embedded class device
2602  * @attr: ?
2603  * @buf: the buffer returned
2604  *
2605  * A sysfs 'read-only' shost attribute.
2606  */
2607 static ssize_t
2608 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2609         char *buf)
2610 {
2611         struct Scsi_Host *shost = class_to_shost(cdev);
2612         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2613
2614         return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2615 }
2616 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2617
2618 /**
2619  * _ctl_board_assembly_show - board assembly name
2620  * @cdev: pointer to embedded class device
2621  * @attr: ?
2622  * @buf: the buffer returned
2623  *
2624  * A sysfs 'read-only' shost attribute.
2625  */
2626 static ssize_t
2627 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2628         char *buf)
2629 {
2630         struct Scsi_Host *shost = class_to_shost(cdev);
2631         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2632
2633         return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2634 }
2635 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
2636
2637 /**
2638  * _ctl_board_tracer_show - board tracer number
2639  * @cdev: pointer to embedded class device
2640  * @attr: ?
2641  * @buf: the buffer returned
2642  *
2643  * A sysfs 'read-only' shost attribute.
2644  */
2645 static ssize_t
2646 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2647         char *buf)
2648 {
2649         struct Scsi_Host *shost = class_to_shost(cdev);
2650         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2651
2652         return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2653 }
2654 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
2655
2656 /**
2657  * _ctl_io_delay_show - io missing delay
2658  * @cdev: pointer to embedded class device
2659  * @attr: ?
2660  * @buf: the buffer returned
2661  *
2662  * This is for firmware implemention for deboucing device
2663  * removal events.
2664  *
2665  * A sysfs 'read-only' shost attribute.
2666  */
2667 static ssize_t
2668 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2669         char *buf)
2670 {
2671         struct Scsi_Host *shost = class_to_shost(cdev);
2672         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2673
2674         return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2675 }
2676 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
2677
2678 /**
2679  * _ctl_device_delay_show - device missing delay
2680  * @cdev: pointer to embedded class device
2681  * @attr: ?
2682  * @buf: the buffer returned
2683  *
2684  * This is for firmware implemention for deboucing device
2685  * removal events.
2686  *
2687  * A sysfs 'read-only' shost attribute.
2688  */
2689 static ssize_t
2690 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2691         char *buf)
2692 {
2693         struct Scsi_Host *shost = class_to_shost(cdev);
2694         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2695
2696         return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2697 }
2698 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
2699
2700 /**
2701  * _ctl_fw_queue_depth_show - global credits
2702  * @cdev: pointer to embedded class device
2703  * @attr: ?
2704  * @buf: the buffer returned
2705  *
2706  * This is firmware queue depth limit
2707  *
2708  * A sysfs 'read-only' shost attribute.
2709  */
2710 static ssize_t
2711 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2712         char *buf)
2713 {
2714         struct Scsi_Host *shost = class_to_shost(cdev);
2715         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2716
2717         return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2718 }
2719 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
2720
2721 /**
2722  * _ctl_sas_address_show - sas address
2723  * @cdev: pointer to embedded class device
2724  * @attr: ?
2725  * @buf: the buffer returned
2726  *
2727  * This is the controller sas address
2728  *
2729  * A sysfs 'read-only' shost attribute.
2730  */
2731 static ssize_t
2732 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2733         char *buf)
2734
2735 {
2736         struct Scsi_Host *shost = class_to_shost(cdev);
2737         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2738
2739         return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2740             (unsigned long long)ioc->sas_hba.sas_address);
2741 }
2742 static DEVICE_ATTR(host_sas_address, S_IRUGO,
2743         _ctl_host_sas_address_show, NULL);
2744
2745 /**
2746  * _ctl_logging_level_show - logging level
2747  * @cdev: pointer to embedded class device
2748  * @attr: ?
2749  * @buf: the buffer returned
2750  *
2751  * A sysfs 'read/write' shost attribute.
2752  */
2753 static ssize_t
2754 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2755         char *buf)
2756 {
2757         struct Scsi_Host *shost = class_to_shost(cdev);
2758         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2759
2760         return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2761 }
2762 static ssize_t
2763 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2764         const char *buf, size_t count)
2765 {
2766         struct Scsi_Host *shost = class_to_shost(cdev);
2767         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2768         int val = 0;
2769
2770         if (sscanf(buf, "%x", &val) != 1)
2771                 return -EINVAL;
2772
2773         ioc->logging_level = val;
2774         ioc_info(ioc, "logging_level=%08xh\n",
2775                  ioc->logging_level);
2776         return strlen(buf);
2777 }
2778 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
2779         _ctl_logging_level_store);
2780
2781 /**
2782  * _ctl_fwfault_debug_show - show/store fwfault_debug
2783  * @cdev: pointer to embedded class device
2784  * @attr: ?
2785  * @buf: the buffer returned
2786  *
2787  * mpt3sas_fwfault_debug is command line option
2788  * A sysfs 'read/write' shost attribute.
2789  */
2790 static ssize_t
2791 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
2792         char *buf)
2793 {
2794         struct Scsi_Host *shost = class_to_shost(cdev);
2795         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2796
2797         return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2798 }
2799 static ssize_t
2800 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2801         const char *buf, size_t count)
2802 {
2803         struct Scsi_Host *shost = class_to_shost(cdev);
2804         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2805         int val = 0;
2806
2807         if (sscanf(buf, "%d", &val) != 1)
2808                 return -EINVAL;
2809
2810         ioc->fwfault_debug = val;
2811         ioc_info(ioc, "fwfault_debug=%d\n",
2812                  ioc->fwfault_debug);
2813         return strlen(buf);
2814 }
2815 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2816         _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2817
2818 /**
2819  * _ctl_ioc_reset_count_show - ioc reset count
2820  * @cdev: pointer to embedded class device
2821  * @attr: ?
2822  * @buf: the buffer returned
2823  *
2824  * This is firmware queue depth limit
2825  *
2826  * A sysfs 'read-only' shost attribute.
2827  */
2828 static ssize_t
2829 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2830         char *buf)
2831 {
2832         struct Scsi_Host *shost = class_to_shost(cdev);
2833         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2834
2835         return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
2836 }
2837 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
2838
2839 /**
2840  * _ctl_ioc_reply_queue_count_show - number of reply queues
2841  * @cdev: pointer to embedded class device
2842  * @attr: ?
2843  * @buf: the buffer returned
2844  *
2845  * This is number of reply queues
2846  *
2847  * A sysfs 'read-only' shost attribute.
2848  */
2849 static ssize_t
2850 _ctl_ioc_reply_queue_count_show(struct device *cdev,
2851         struct device_attribute *attr, char *buf)
2852 {
2853         u8 reply_queue_count;
2854         struct Scsi_Host *shost = class_to_shost(cdev);
2855         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2856
2857         if ((ioc->facts.IOCCapabilities &
2858             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2859                 reply_queue_count = ioc->reply_queue_count;
2860         else
2861                 reply_queue_count = 1;
2862
2863         return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2864 }
2865 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
2866         NULL);
2867
2868 /**
2869  * _ctl_BRM_status_show - Backup Rail Monitor Status
2870  * @cdev: pointer to embedded class device
2871  * @attr: ?
2872  * @buf: the buffer returned
2873  *
2874  * This is number of reply queues
2875  *
2876  * A sysfs 'read-only' shost attribute.
2877  */
2878 static ssize_t
2879 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2880         char *buf)
2881 {
2882         struct Scsi_Host *shost = class_to_shost(cdev);
2883         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2884         Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2885         Mpi2ConfigReply_t mpi_reply;
2886         u16 backup_rail_monitor_status = 0;
2887         u16 ioc_status;
2888         int sz;
2889         ssize_t rc = 0;
2890
2891         if (!ioc->is_warpdrive) {
2892                 ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
2893                         __func__);
2894                 goto out;
2895         }
2896         /* pci_access_mutex lock acquired by sysfs show path */
2897         mutex_lock(&ioc->pci_access_mutex);
2898         if (ioc->pci_error_recovery || ioc->remove_host) {
2899                 mutex_unlock(&ioc->pci_access_mutex);
2900                 return 0;
2901         }
2902
2903         /* allocate upto GPIOVal 36 entries */
2904         sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2905         io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2906         if (!io_unit_pg3) {
2907                 ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
2908                         __func__, sz);
2909                 goto out;
2910         }
2911
2912         if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2913             0) {
2914                 ioc_err(ioc, "%s: failed reading iounit_pg3\n",
2915                         __func__);
2916                 goto out;
2917         }
2918
2919         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2920         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2921                 ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
2922                         __func__, ioc_status);
2923                 goto out;
2924         }
2925
2926         if (io_unit_pg3->GPIOCount < 25) {
2927                 ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
2928                         __func__, io_unit_pg3->GPIOCount);
2929                 goto out;
2930         }
2931
2932         /* BRM status is in bit zero of GPIOVal[24] */
2933         backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
2934         rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
2935
2936  out:
2937         kfree(io_unit_pg3);
2938         mutex_unlock(&ioc->pci_access_mutex);
2939         return rc;
2940 }
2941 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
2942
2943 struct DIAG_BUFFER_START {
2944         __le32  Size;
2945         __le32  DiagVersion;
2946         u8      BufferType;
2947         u8      Reserved[3];
2948         __le32  Reserved1;
2949         __le32  Reserved2;
2950         __le32  Reserved3;
2951 };
2952
2953 /**
2954  * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
2955  * @cdev: pointer to embedded class device
2956  * @attr: ?
2957  * @buf: the buffer returned
2958  *
2959  * A sysfs 'read-only' shost attribute.
2960  */
2961 static ssize_t
2962 _ctl_host_trace_buffer_size_show(struct device *cdev,
2963         struct device_attribute *attr, char *buf)
2964 {
2965         struct Scsi_Host *shost = class_to_shost(cdev);
2966         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2967         u32 size = 0;
2968         struct DIAG_BUFFER_START *request_data;
2969
2970         if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2971                 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
2972                         __func__);
2973                 return 0;
2974         }
2975
2976         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2977             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2978                 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
2979                         __func__);
2980                 return 0;
2981         }
2982
2983         request_data = (struct DIAG_BUFFER_START *)
2984             ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
2985         if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
2986             le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
2987             le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
2988             le32_to_cpu(request_data->Reserved3) == 0x4742444c)
2989                 size = le32_to_cpu(request_data->Size);
2990
2991         ioc->ring_buffer_sz = size;
2992         return snprintf(buf, PAGE_SIZE, "%d\n", size);
2993 }
2994 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
2995         _ctl_host_trace_buffer_size_show, NULL);
2996
2997 /**
2998  * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
2999  * @cdev: pointer to embedded class device
3000  * @attr: ?
3001  * @buf: the buffer returned
3002  *
3003  * A sysfs 'read/write' shost attribute.
3004  *
3005  * You will only be able to read 4k bytes of ring buffer at a time.
3006  * In order to read beyond 4k bytes, you will have to write out the
3007  * offset to the same attribute, it will move the pointer.
3008  */
3009 static ssize_t
3010 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3011         char *buf)
3012 {
3013         struct Scsi_Host *shost = class_to_shost(cdev);
3014         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3015         void *request_data;
3016         u32 size;
3017
3018         if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3019                 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3020                         __func__);
3021                 return 0;
3022         }
3023
3024         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3025             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3026                 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3027                         __func__);
3028                 return 0;
3029         }
3030
3031         if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3032                 return 0;
3033
3034         size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3035         size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3036         request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3037         memcpy(buf, request_data, size);
3038         return size;
3039 }
3040
3041 static ssize_t
3042 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3043         const char *buf, size_t count)
3044 {
3045         struct Scsi_Host *shost = class_to_shost(cdev);
3046         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3047         int val = 0;
3048
3049         if (sscanf(buf, "%d", &val) != 1)
3050                 return -EINVAL;
3051
3052         ioc->ring_buffer_offset = val;
3053         return strlen(buf);
3054 }
3055 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
3056         _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
3057
3058
3059 /*****************************************/
3060
3061 /**
3062  * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
3063  * @cdev: pointer to embedded class device
3064  * @attr: ?
3065  * @buf: the buffer returned
3066  *
3067  * A sysfs 'read/write' shost attribute.
3068  *
3069  * This is a mechnism to post/release host_trace_buffers
3070  */
3071 static ssize_t
3072 _ctl_host_trace_buffer_enable_show(struct device *cdev,
3073         struct device_attribute *attr, char *buf)
3074 {
3075         struct Scsi_Host *shost = class_to_shost(cdev);
3076         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3077
3078         if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3079            ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3080             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3081                 return snprintf(buf, PAGE_SIZE, "off\n");
3082         else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3083             MPT3_DIAG_BUFFER_IS_RELEASED))
3084                 return snprintf(buf, PAGE_SIZE, "release\n");
3085         else
3086                 return snprintf(buf, PAGE_SIZE, "post\n");
3087 }
3088
3089 static ssize_t
3090 _ctl_host_trace_buffer_enable_store(struct device *cdev,
3091         struct device_attribute *attr, const char *buf, size_t count)
3092 {
3093         struct Scsi_Host *shost = class_to_shost(cdev);
3094         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3095         char str[10] = "";
3096         struct mpt3_diag_register diag_register;
3097         u8 issue_reset = 0;
3098
3099         /* don't allow post/release occurr while recovery is active */
3100         if (ioc->shost_recovery || ioc->remove_host ||
3101             ioc->pci_error_recovery || ioc->is_driver_loading)
3102                 return -EBUSY;
3103
3104         if (sscanf(buf, "%9s", str) != 1)
3105                 return -EINVAL;
3106
3107         if (!strcmp(str, "post")) {
3108                 /* exit out if host buffers are already posted */
3109                 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3110                     (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3111                     MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3112                     ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3113                     MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3114                         goto out;
3115                 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3116                 ioc_info(ioc, "posting host trace buffers\n");
3117                 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3118                 diag_register.requested_buffer_size = (1024 * 1024);
3119                 diag_register.unique_id = 0x7075900;
3120                 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3121                 _ctl_diag_register_2(ioc,  &diag_register);
3122         } else if (!strcmp(str, "release")) {
3123                 /* exit out if host buffers are already released */
3124                 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3125                         goto out;
3126                 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3127                     MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3128                         goto out;
3129                 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3130                     MPT3_DIAG_BUFFER_IS_RELEASED))
3131                         goto out;
3132                 ioc_info(ioc, "releasing host trace buffer\n");
3133                 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3134                     &issue_reset);
3135         }
3136
3137  out:
3138         return strlen(buf);
3139 }
3140 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
3141         _ctl_host_trace_buffer_enable_show,
3142         _ctl_host_trace_buffer_enable_store);
3143
3144 /*********** diagnostic trigger suppport *********************************/
3145
3146 /**
3147  * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
3148  * @cdev: pointer to embedded class device
3149  * @attr: ?
3150  * @buf: the buffer returned
3151  *
3152  * A sysfs 'read/write' shost attribute.
3153  */
3154 static ssize_t
3155 _ctl_diag_trigger_master_show(struct device *cdev,
3156         struct device_attribute *attr, char *buf)
3157
3158 {
3159         struct Scsi_Host *shost = class_to_shost(cdev);
3160         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3161         unsigned long flags;
3162         ssize_t rc;
3163
3164         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3165         rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3166         memcpy(buf, &ioc->diag_trigger_master, rc);
3167         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3168         return rc;
3169 }
3170
3171 /**
3172  * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
3173  * @cdev: pointer to embedded class device
3174  * @attr: ?
3175  * @buf: the buffer returned
3176  * @count: ?
3177  *
3178  * A sysfs 'read/write' shost attribute.
3179  */
3180 static ssize_t
3181 _ctl_diag_trigger_master_store(struct device *cdev,
3182         struct device_attribute *attr, const char *buf, size_t count)
3183
3184 {
3185         struct Scsi_Host *shost = class_to_shost(cdev);
3186         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3187         unsigned long flags;
3188         ssize_t rc;
3189
3190         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3191         rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3192         memset(&ioc->diag_trigger_master, 0,
3193             sizeof(struct SL_WH_MASTER_TRIGGER_T));
3194         memcpy(&ioc->diag_trigger_master, buf, rc);
3195         ioc->diag_trigger_master.MasterData |=
3196             (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3197         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3198         return rc;
3199 }
3200 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
3201         _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
3202
3203
3204 /**
3205  * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
3206  * @cdev: pointer to embedded class device
3207  * @attr: ?
3208  * @buf: the buffer returned
3209  *
3210  * A sysfs 'read/write' shost attribute.
3211  */
3212 static ssize_t
3213 _ctl_diag_trigger_event_show(struct device *cdev,
3214         struct device_attribute *attr, char *buf)
3215 {
3216         struct Scsi_Host *shost = class_to_shost(cdev);
3217         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3218         unsigned long flags;
3219         ssize_t rc;
3220
3221         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3222         rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3223         memcpy(buf, &ioc->diag_trigger_event, rc);
3224         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3225         return rc;
3226 }
3227
3228 /**
3229  * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
3230  * @cdev: pointer to embedded class device
3231  * @attr: ?
3232  * @buf: the buffer returned
3233  * @count: ?
3234  *
3235  * A sysfs 'read/write' shost attribute.
3236  */
3237 static ssize_t
3238 _ctl_diag_trigger_event_store(struct device *cdev,
3239         struct device_attribute *attr, const char *buf, size_t count)
3240
3241 {
3242         struct Scsi_Host *shost = class_to_shost(cdev);
3243         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3244         unsigned long flags;
3245         ssize_t sz;
3246
3247         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3248         sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3249         memset(&ioc->diag_trigger_event, 0,
3250             sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3251         memcpy(&ioc->diag_trigger_event, buf, sz);
3252         if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3253                 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3254         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3255         return sz;
3256 }
3257 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
3258         _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
3259
3260
3261 /**
3262  * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3263  * @cdev: pointer to embedded class device
3264  * @attr: ?
3265  * @buf: the buffer returned
3266  *
3267  * A sysfs 'read/write' shost attribute.
3268  */
3269 static ssize_t
3270 _ctl_diag_trigger_scsi_show(struct device *cdev,
3271         struct device_attribute *attr, char *buf)
3272 {
3273         struct Scsi_Host *shost = class_to_shost(cdev);
3274         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3275         unsigned long flags;
3276         ssize_t rc;
3277
3278         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3279         rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3280         memcpy(buf, &ioc->diag_trigger_scsi, rc);
3281         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3282         return rc;
3283 }
3284
3285 /**
3286  * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3287  * @cdev: pointer to embedded class device
3288  * @attr: ?
3289  * @buf: the buffer returned
3290  * @count: ?
3291  *
3292  * A sysfs 'read/write' shost attribute.
3293  */
3294 static ssize_t
3295 _ctl_diag_trigger_scsi_store(struct device *cdev,
3296         struct device_attribute *attr, const char *buf, size_t count)
3297 {
3298         struct Scsi_Host *shost = class_to_shost(cdev);
3299         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3300         unsigned long flags;
3301         ssize_t sz;
3302
3303         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3304         sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
3305         memset(&ioc->diag_trigger_scsi, 0,
3306             sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3307         memcpy(&ioc->diag_trigger_scsi, buf, sz);
3308         if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3309                 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3310         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3311         return sz;
3312 }
3313 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
3314         _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
3315
3316
3317 /**
3318  * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3319  * @cdev: pointer to embedded class device
3320  * @attr: ?
3321  * @buf: the buffer returned
3322  *
3323  * A sysfs 'read/write' shost attribute.
3324  */
3325 static ssize_t
3326 _ctl_diag_trigger_mpi_show(struct device *cdev,
3327         struct device_attribute *attr, char *buf)
3328 {
3329         struct Scsi_Host *shost = class_to_shost(cdev);
3330         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3331         unsigned long flags;
3332         ssize_t rc;
3333
3334         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3335         rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3336         memcpy(buf, &ioc->diag_trigger_mpi, rc);
3337         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3338         return rc;
3339 }
3340
3341 /**
3342  * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3343  * @cdev: pointer to embedded class device
3344  * @attr: ?
3345  * @buf: the buffer returned
3346  * @count: ?
3347  *
3348  * A sysfs 'read/write' shost attribute.
3349  */
3350 static ssize_t
3351 _ctl_diag_trigger_mpi_store(struct device *cdev,
3352         struct device_attribute *attr, const char *buf, size_t count)
3353 {
3354         struct Scsi_Host *shost = class_to_shost(cdev);
3355         struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3356         unsigned long flags;
3357         ssize_t sz;
3358
3359         spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3360         sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3361         memset(&ioc->diag_trigger_mpi, 0,
3362             sizeof(ioc->diag_trigger_mpi));
3363         memcpy(&ioc->diag_trigger_mpi, buf, sz);
3364         if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3365                 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3366         spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3367         return sz;
3368 }
3369
3370 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3371         _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
3372
3373 /*********** diagnostic trigger suppport *** END ****************************/
3374
3375 /*****************************************/
3376
3377 struct device_attribute *mpt3sas_host_attrs[] = {
3378         &dev_attr_version_fw,
3379         &dev_attr_version_bios,
3380         &dev_attr_version_mpi,
3381         &dev_attr_version_product,
3382         &dev_attr_version_nvdata_persistent,
3383         &dev_attr_version_nvdata_default,
3384         &dev_attr_board_name,
3385         &dev_attr_board_assembly,
3386         &dev_attr_board_tracer,
3387         &dev_attr_io_delay,
3388         &dev_attr_device_delay,
3389         &dev_attr_logging_level,
3390         &dev_attr_fwfault_debug,
3391         &dev_attr_fw_queue_depth,
3392         &dev_attr_host_sas_address,
3393         &dev_attr_ioc_reset_count,
3394         &dev_attr_host_trace_buffer_size,
3395         &dev_attr_host_trace_buffer,
3396         &dev_attr_host_trace_buffer_enable,
3397         &dev_attr_reply_queue_count,
3398         &dev_attr_diag_trigger_master,
3399         &dev_attr_diag_trigger_event,
3400         &dev_attr_diag_trigger_scsi,
3401         &dev_attr_diag_trigger_mpi,
3402         &dev_attr_BRM_status,
3403         NULL,
3404 };
3405
3406 /* device attributes */
3407
3408 /**
3409  * _ctl_device_sas_address_show - sas address
3410  * @dev: pointer to embedded class device
3411  * @attr: ?
3412  * @buf: the buffer returned
3413  *
3414  * This is the sas address for the target
3415  *
3416  * A sysfs 'read-only' shost attribute.
3417  */
3418 static ssize_t
3419 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
3420         char *buf)
3421 {
3422         struct scsi_device *sdev = to_scsi_device(dev);
3423         struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3424
3425         return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3426             (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3427 }
3428 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
3429
3430 /**
3431  * _ctl_device_handle_show - device handle
3432  * @dev: pointer to embedded class device
3433  * @attr: ?
3434  * @buf: the buffer returned
3435  *
3436  * This is the firmware assigned device handle
3437  *
3438  * A sysfs 'read-only' shost attribute.
3439  */
3440 static ssize_t
3441 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3442         char *buf)
3443 {
3444         struct scsi_device *sdev = to_scsi_device(dev);
3445         struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3446
3447         return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3448             sas_device_priv_data->sas_target->handle);
3449 }
3450 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3451
3452 /**
3453  * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3454  * @dev: pointer to embedded device
3455  * @attr: ?
3456  * @buf: the buffer returned
3457  *
3458  * A sysfs 'read/write' sdev attribute, only works with SATA
3459  */
3460 static ssize_t
3461 _ctl_device_ncq_prio_enable_show(struct device *dev,
3462                                  struct device_attribute *attr, char *buf)
3463 {
3464         struct scsi_device *sdev = to_scsi_device(dev);
3465         struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3466
3467         return snprintf(buf, PAGE_SIZE, "%d\n",
3468                         sas_device_priv_data->ncq_prio_enable);
3469 }
3470
3471 static ssize_t
3472 _ctl_device_ncq_prio_enable_store(struct device *dev,
3473                                   struct device_attribute *attr,
3474                                   const char *buf, size_t count)
3475 {
3476         struct scsi_device *sdev = to_scsi_device(dev);
3477         struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3478         bool ncq_prio_enable = 0;
3479
3480         if (kstrtobool(buf, &ncq_prio_enable))
3481                 return -EINVAL;
3482
3483         if (!scsih_ncq_prio_supp(sdev))
3484                 return -EINVAL;
3485
3486         sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3487         return strlen(buf);
3488 }
3489 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3490                    _ctl_device_ncq_prio_enable_show,
3491                    _ctl_device_ncq_prio_enable_store);
3492
3493 struct device_attribute *mpt3sas_dev_attrs[] = {
3494         &dev_attr_sas_address,
3495         &dev_attr_sas_device_handle,
3496         &dev_attr_sas_ncq_prio_enable,
3497         NULL,
3498 };
3499
3500 /* file operations table for mpt3ctl device */
3501 static const struct file_operations ctl_fops = {
3502         .owner = THIS_MODULE,
3503         .unlocked_ioctl = _ctl_ioctl,
3504         .poll = _ctl_poll,
3505         .fasync = _ctl_fasync,
3506 #ifdef CONFIG_COMPAT
3507         .compat_ioctl = _ctl_ioctl_compat,
3508 #endif
3509 };
3510
3511 /* file operations table for mpt2ctl device */
3512 static const struct file_operations ctl_gen2_fops = {
3513         .owner = THIS_MODULE,
3514         .unlocked_ioctl = _ctl_mpt2_ioctl,
3515         .poll = _ctl_poll,
3516         .fasync = _ctl_fasync,
3517 #ifdef CONFIG_COMPAT
3518         .compat_ioctl = _ctl_mpt2_ioctl_compat,
3519 #endif
3520 };
3521
3522 static struct miscdevice ctl_dev = {
3523         .minor  = MPT3SAS_MINOR,
3524         .name   = MPT3SAS_DEV_NAME,
3525         .fops   = &ctl_fops,
3526 };
3527
3528 static struct miscdevice gen2_ctl_dev = {
3529         .minor  = MPT2SAS_MINOR,
3530         .name   = MPT2SAS_DEV_NAME,
3531         .fops   = &ctl_gen2_fops,
3532 };
3533
3534 /**
3535  * mpt3sas_ctl_init - main entry point for ctl.
3536  * @hbas_to_enumerate: ?
3537  */
3538 void
3539 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3540 {
3541         async_queue = NULL;
3542
3543         /* Don't register mpt3ctl ioctl device if
3544          * hbas_to_enumarate is one.
3545          */
3546         if (hbas_to_enumerate != 1)
3547                 if (misc_register(&ctl_dev) < 0)
3548                         pr_err("%s can't register misc device [minor=%d]\n",
3549                             MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3550
3551         /* Don't register mpt3ctl ioctl device if
3552          * hbas_to_enumarate is two.
3553          */
3554         if (hbas_to_enumerate != 2)
3555                 if (misc_register(&gen2_ctl_dev) < 0)
3556                         pr_err("%s can't register misc device [minor=%d]\n",
3557                             MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3558
3559         init_waitqueue_head(&ctl_poll_wait);
3560 }
3561
3562 /**
3563  * mpt3sas_ctl_exit - exit point for ctl
3564  * @hbas_to_enumerate: ?
3565  */
3566 void
3567 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3568 {
3569         struct MPT3SAS_ADAPTER *ioc;
3570         int i;
3571
3572         list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3573
3574                 /* free memory associated to diag buffers */
3575                 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3576                         if (!ioc->diag_buffer[i])
3577                                 continue;
3578                         if (!(ioc->diag_buffer_status[i] &
3579                             MPT3_DIAG_BUFFER_IS_REGISTERED))
3580                                 continue;
3581                         if ((ioc->diag_buffer_status[i] &
3582                             MPT3_DIAG_BUFFER_IS_RELEASED))
3583                                 continue;
3584                         dma_free_coherent(&ioc->pdev->dev,
3585                                           ioc->diag_buffer_sz[i],
3586                                           ioc->diag_buffer[i],
3587                                           ioc->diag_buffer_dma[i]);
3588                         ioc->diag_buffer[i] = NULL;
3589                         ioc->diag_buffer_status[i] = 0;
3590                 }
3591
3592                 kfree(ioc->event_log);
3593         }
3594         if (hbas_to_enumerate != 1)
3595                 misc_deregister(&ctl_dev);
3596         if (hbas_to_enumerate != 2)
3597                 misc_deregister(&gen2_ctl_dev);
3598 }
This page took 0.244324 seconds and 4 git commands to generate.