]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
c8b84976 SR |
2 | * Copyright (C) 2001 Troy D. Armstrong IBM Corporation |
3 | * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation | |
4 | * | |
5 | * This modules exists as an interface between a Linux secondary partition | |
6 | * running on an iSeries and the primary partition's Virtual Service | |
7 | * Processor (VSP) object. The VSP has final authority over powering on/off | |
8 | * all partitions in the iSeries. It also provides miscellaneous low-level | |
9 | * machine facility type operations. | |
10 | * | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 | * GNU General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; if not, write to the Free Software | |
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
25 | */ | |
1da177e4 LT |
26 | |
27 | #include <linux/types.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/kernel.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/completion.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/dma-mapping.h> | |
34 | #include <linux/bcd.h> | |
143a1dec | 35 | #include <linux/rtc.h> |
1da177e4 LT |
36 | |
37 | #include <asm/time.h> | |
38 | #include <asm/uaccess.h> | |
d0e8e291 | 39 | #include <asm/paca.h> |
426c1a11 | 40 | #include <asm/abs_addr.h> |
d9523aa1 | 41 | #include <asm/firmware.h> |
b4206778 | 42 | #include <asm/iseries/vio.h> |
bbc8b628 | 43 | #include <asm/iseries/mf.h> |
15b17189 | 44 | #include <asm/iseries/hv_lp_config.h> |
8875ccfb | 45 | #include <asm/iseries/it_lp_queue.h> |
1da177e4 | 46 | |
c8b84976 SR |
47 | #include "setup.h" |
48 | ||
260de22f | 49 | static int mf_initialized; |
c8b84976 | 50 | |
1da177e4 LT |
51 | /* |
52 | * This is the structure layout for the Machine Facilites LPAR event | |
53 | * flows. | |
54 | */ | |
55 | struct vsp_cmd_data { | |
56 | u64 token; | |
57 | u16 cmd; | |
58 | HvLpIndex lp_index; | |
59 | u8 result_code; | |
60 | u32 reserved; | |
61 | union { | |
62 | u64 state; /* GetStateOut */ | |
63 | u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */ | |
64 | u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */ | |
65 | u64 page[4]; /* GetSrcHistoryIn */ | |
66 | u64 flag; /* GetAutoIplWhenPrimaryIplsOut, | |
67 | SetAutoIplWhenPrimaryIplsIn, | |
68 | WhiteButtonPowerOffIn, | |
69 | Function08FastPowerOffIn, | |
70 | IsSpcnRackPowerIncompleteOut */ | |
71 | struct { | |
72 | u64 token; | |
73 | u64 address_type; | |
74 | u64 side; | |
75 | u32 length; | |
76 | u32 offset; | |
77 | } kern; /* SetKernelImageIn, GetKernelImageIn, | |
78 | SetKernelCmdLineIn, GetKernelCmdLineIn */ | |
79 | u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */ | |
80 | u8 reserved[80]; | |
81 | } sub_data; | |
82 | }; | |
83 | ||
84 | struct vsp_rsp_data { | |
85 | struct completion com; | |
86 | struct vsp_cmd_data *response; | |
87 | }; | |
88 | ||
89 | struct alloc_data { | |
90 | u16 size; | |
91 | u16 type; | |
92 | u32 count; | |
93 | u16 reserved1; | |
94 | u8 reserved2; | |
95 | HvLpIndex target_lp; | |
96 | }; | |
97 | ||
98 | struct ce_msg_data; | |
99 | ||
100 | typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp); | |
101 | ||
102 | struct ce_msg_comp_data { | |
103 | ce_msg_comp_hdlr handler; | |
104 | void *token; | |
105 | }; | |
106 | ||
107 | struct ce_msg_data { | |
108 | u8 ce_msg[12]; | |
109 | char reserved[4]; | |
110 | struct ce_msg_comp_data *completion; | |
111 | }; | |
112 | ||
113 | struct io_mf_lp_event { | |
114 | struct HvLpEvent hp_lp_event; | |
115 | u16 subtype_result_code; | |
116 | u16 reserved1; | |
117 | u32 reserved2; | |
118 | union { | |
119 | struct alloc_data alloc; | |
120 | struct ce_msg_data ce_msg; | |
121 | struct vsp_cmd_data vsp_cmd; | |
122 | } data; | |
123 | }; | |
124 | ||
125 | #define subtype_data(a, b, c, d) \ | |
126 | (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) | |
127 | ||
128 | /* | |
129 | * All outgoing event traffic is kept on a FIFO queue. The first | |
130 | * pointer points to the one that is outstanding, and all new | |
131 | * requests get stuck on the end. Also, we keep a certain number of | |
132 | * preallocated pending events so that we can operate very early in | |
133 | * the boot up sequence (before kmalloc is ready). | |
134 | */ | |
135 | struct pending_event { | |
136 | struct pending_event *next; | |
137 | struct io_mf_lp_event event; | |
138 | MFCompleteHandler hdlr; | |
139 | char dma_data[72]; | |
140 | unsigned dma_data_length; | |
141 | unsigned remote_address; | |
142 | }; | |
143 | static spinlock_t pending_event_spinlock; | |
144 | static struct pending_event *pending_event_head; | |
145 | static struct pending_event *pending_event_tail; | |
146 | static struct pending_event *pending_event_avail; | |
260de22f ME |
147 | #define PENDING_EVENT_PREALLOC_LEN 16 |
148 | static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN]; | |
1da177e4 LT |
149 | |
150 | /* | |
151 | * Put a pending event onto the available queue, so it can get reused. | |
152 | * Attention! You must have the pending_event_spinlock before calling! | |
153 | */ | |
154 | static void free_pending_event(struct pending_event *ev) | |
155 | { | |
156 | if (ev != NULL) { | |
157 | ev->next = pending_event_avail; | |
158 | pending_event_avail = ev; | |
159 | } | |
160 | } | |
161 | ||
162 | /* | |
163 | * Enqueue the outbound event onto the stack. If the queue was | |
164 | * empty to begin with, we must also issue it via the Hypervisor | |
165 | * interface. There is a section of code below that will touch | |
166 | * the first stack pointer without the protection of the pending_event_spinlock. | |
167 | * This is OK, because we know that nobody else will be modifying | |
168 | * the first pointer when we do this. | |
169 | */ | |
170 | static int signal_event(struct pending_event *ev) | |
171 | { | |
172 | int rc = 0; | |
173 | unsigned long flags; | |
174 | int go = 1; | |
175 | struct pending_event *ev1; | |
176 | HvLpEvent_Rc hv_rc; | |
177 | ||
178 | /* enqueue the event */ | |
179 | if (ev != NULL) { | |
180 | ev->next = NULL; | |
181 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
182 | if (pending_event_head == NULL) | |
183 | pending_event_head = ev; | |
184 | else { | |
185 | go = 0; | |
186 | pending_event_tail->next = ev; | |
187 | } | |
188 | pending_event_tail = ev; | |
189 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
190 | } | |
191 | ||
192 | /* send the event */ | |
193 | while (go) { | |
194 | go = 0; | |
195 | ||
196 | /* any DMA data to send beforehand? */ | |
197 | if (pending_event_head->dma_data_length > 0) | |
198 | HvCallEvent_dmaToSp(pending_event_head->dma_data, | |
199 | pending_event_head->remote_address, | |
200 | pending_event_head->dma_data_length, | |
201 | HvLpDma_Direction_LocalToRemote); | |
202 | ||
203 | hv_rc = HvCallEvent_signalLpEvent( | |
204 | &pending_event_head->event.hp_lp_event); | |
205 | if (hv_rc != HvLpEvent_Rc_Good) { | |
206 | printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() " | |
207 | "failed with %d\n", (int)hv_rc); | |
208 | ||
209 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
210 | ev1 = pending_event_head; | |
211 | pending_event_head = pending_event_head->next; | |
212 | if (pending_event_head != NULL) | |
213 | go = 1; | |
214 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
215 | ||
216 | if (ev1 == ev) | |
217 | rc = -EIO; | |
218 | else if (ev1->hdlr != NULL) | |
219 | (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO); | |
220 | ||
221 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
222 | free_pending_event(ev1); | |
223 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
224 | } | |
225 | } | |
226 | ||
227 | return rc; | |
228 | } | |
229 | ||
230 | /* | |
231 | * Allocate a new pending_event structure, and initialize it. | |
232 | */ | |
233 | static struct pending_event *new_pending_event(void) | |
234 | { | |
235 | struct pending_event *ev = NULL; | |
236 | HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex(); | |
237 | unsigned long flags; | |
238 | struct HvLpEvent *hev; | |
239 | ||
240 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
241 | if (pending_event_avail != NULL) { | |
242 | ev = pending_event_avail; | |
243 | pending_event_avail = pending_event_avail->next; | |
244 | } | |
245 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
246 | if (ev == NULL) { | |
247 | ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC); | |
248 | if (ev == NULL) { | |
249 | printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", | |
250 | sizeof(struct pending_event)); | |
251 | return NULL; | |
252 | } | |
253 | } | |
254 | memset(ev, 0, sizeof(struct pending_event)); | |
255 | hev = &ev->event.hp_lp_event; | |
677f8c0d | 256 | hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT; |
1da177e4 LT |
257 | hev->xType = HvLpEvent_Type_MachineFac; |
258 | hev->xSourceLp = HvLpConfig_getLpIndex(); | |
259 | hev->xTargetLp = primary_lp; | |
260 | hev->xSizeMinus1 = sizeof(ev->event) - 1; | |
261 | hev->xRc = HvLpEvent_Rc_Good; | |
262 | hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp, | |
263 | HvLpEvent_Type_MachineFac); | |
264 | hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp, | |
265 | HvLpEvent_Type_MachineFac); | |
266 | ||
267 | return ev; | |
268 | } | |
269 | ||
270 | static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd) | |
271 | { | |
272 | struct pending_event *ev = new_pending_event(); | |
273 | int rc; | |
274 | struct vsp_rsp_data response; | |
275 | ||
276 | if (ev == NULL) | |
277 | return -ENOMEM; | |
278 | ||
279 | init_completion(&response.com); | |
280 | response.response = vsp_cmd; | |
281 | ev->event.hp_lp_event.xSubtype = 6; | |
282 | ev->event.hp_lp_event.x.xSubtypeData = | |
283 | subtype_data('M', 'F', 'V', 'I'); | |
284 | ev->event.data.vsp_cmd.token = (u64)&response; | |
285 | ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd; | |
286 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | |
287 | ev->event.data.vsp_cmd.result_code = 0xFF; | |
288 | ev->event.data.vsp_cmd.reserved = 0; | |
289 | memcpy(&(ev->event.data.vsp_cmd.sub_data), | |
290 | &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data)); | |
291 | mb(); | |
292 | ||
293 | rc = signal_event(ev); | |
294 | if (rc == 0) | |
295 | wait_for_completion(&response.com); | |
296 | return rc; | |
297 | } | |
298 | ||
299 | ||
300 | /* | |
301 | * Send a 12-byte CE message to the primary partition VSP object | |
302 | */ | |
303 | static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion) | |
304 | { | |
305 | struct pending_event *ev = new_pending_event(); | |
306 | ||
307 | if (ev == NULL) | |
308 | return -ENOMEM; | |
309 | ||
310 | ev->event.hp_lp_event.xSubtype = 0; | |
311 | ev->event.hp_lp_event.x.xSubtypeData = | |
312 | subtype_data('M', 'F', 'C', 'E'); | |
313 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | |
314 | ev->event.data.ce_msg.completion = completion; | |
315 | return signal_event(ev); | |
316 | } | |
317 | ||
318 | /* | |
319 | * Send a 12-byte CE message (with no data) to the primary partition VSP object | |
320 | */ | |
321 | static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion) | |
322 | { | |
323 | u8 ce_msg[12]; | |
324 | ||
325 | memset(ce_msg, 0, sizeof(ce_msg)); | |
326 | ce_msg[3] = ce_op; | |
327 | return signal_ce_msg(ce_msg, completion); | |
328 | } | |
329 | ||
330 | /* | |
331 | * Send a 12-byte CE message and DMA data to the primary partition VSP object | |
332 | */ | |
333 | static int dma_and_signal_ce_msg(char *ce_msg, | |
334 | struct ce_msg_comp_data *completion, void *dma_data, | |
335 | unsigned dma_data_length, unsigned remote_address) | |
336 | { | |
337 | struct pending_event *ev = new_pending_event(); | |
338 | ||
339 | if (ev == NULL) | |
340 | return -ENOMEM; | |
341 | ||
342 | ev->event.hp_lp_event.xSubtype = 0; | |
343 | ev->event.hp_lp_event.x.xSubtypeData = | |
344 | subtype_data('M', 'F', 'C', 'E'); | |
345 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | |
346 | ev->event.data.ce_msg.completion = completion; | |
347 | memcpy(ev->dma_data, dma_data, dma_data_length); | |
348 | ev->dma_data_length = dma_data_length; | |
349 | ev->remote_address = remote_address; | |
350 | return signal_event(ev); | |
351 | } | |
352 | ||
353 | /* | |
354 | * Initiate a nice (hopefully) shutdown of Linux. We simply are | |
355 | * going to try and send the init process a SIGINT signal. If | |
356 | * this fails (why?), we'll simply force it off in a not-so-nice | |
357 | * manner. | |
358 | */ | |
359 | static int shutdown(void) | |
360 | { | |
9ec52099 | 361 | int rc = kill_cad_pid(SIGINT, 1); |
1da177e4 LT |
362 | |
363 | if (rc) { | |
364 | printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), " | |
365 | "hard shutdown commencing\n", rc); | |
366 | mf_power_off(); | |
367 | } else | |
368 | printk(KERN_INFO "mf.c: init has been successfully notified " | |
369 | "to proceed with shutdown\n"); | |
370 | return rc; | |
371 | } | |
372 | ||
373 | /* | |
374 | * The primary partition VSP object is sending us a new | |
375 | * event flow. Handle it... | |
376 | */ | |
377 | static void handle_int(struct io_mf_lp_event *event) | |
378 | { | |
379 | struct ce_msg_data *ce_msg_data; | |
380 | struct ce_msg_data *pce_msg_data; | |
381 | unsigned long flags; | |
382 | struct pending_event *pev; | |
383 | ||
384 | /* ack the interrupt */ | |
385 | event->hp_lp_event.xRc = HvLpEvent_Rc_Good; | |
386 | HvCallEvent_ackLpEvent(&event->hp_lp_event); | |
387 | ||
388 | /* process interrupt */ | |
389 | switch (event->hp_lp_event.xSubtype) { | |
390 | case 0: /* CE message */ | |
391 | ce_msg_data = &event->data.ce_msg; | |
392 | switch (ce_msg_data->ce_msg[3]) { | |
393 | case 0x5B: /* power control notification */ | |
394 | if ((ce_msg_data->ce_msg[5] & 0x20) != 0) { | |
395 | printk(KERN_INFO "mf.c: Commencing partition shutdown\n"); | |
396 | if (shutdown() == 0) | |
397 | signal_ce_msg_simple(0xDB, NULL); | |
398 | } | |
399 | break; | |
400 | case 0xC0: /* get time */ | |
401 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
402 | pev = pending_event_head; | |
403 | if (pev != NULL) | |
404 | pending_event_head = pending_event_head->next; | |
405 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
406 | if (pev == NULL) | |
407 | break; | |
408 | pce_msg_data = &pev->event.data.ce_msg; | |
409 | if (pce_msg_data->ce_msg[3] != 0x40) | |
410 | break; | |
411 | if (pce_msg_data->completion != NULL) { | |
412 | ce_msg_comp_hdlr handler = | |
413 | pce_msg_data->completion->handler; | |
414 | void *token = pce_msg_data->completion->token; | |
415 | ||
416 | if (handler != NULL) | |
417 | (*handler)(token, ce_msg_data); | |
418 | } | |
419 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
420 | free_pending_event(pev); | |
421 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
422 | /* send next waiting event */ | |
423 | if (pending_event_head != NULL) | |
424 | signal_event(NULL); | |
425 | break; | |
426 | } | |
427 | break; | |
428 | case 1: /* IT sys shutdown */ | |
429 | printk(KERN_INFO "mf.c: Commencing system shutdown\n"); | |
430 | shutdown(); | |
431 | break; | |
432 | } | |
433 | } | |
434 | ||
435 | /* | |
436 | * The primary partition VSP object is acknowledging the receipt | |
437 | * of a flow we sent to them. If there are other flows queued | |
438 | * up, we must send another one now... | |
439 | */ | |
440 | static void handle_ack(struct io_mf_lp_event *event) | |
441 | { | |
442 | unsigned long flags; | |
443 | struct pending_event *two = NULL; | |
444 | unsigned long free_it = 0; | |
445 | struct ce_msg_data *ce_msg_data; | |
446 | struct ce_msg_data *pce_msg_data; | |
447 | struct vsp_rsp_data *rsp; | |
448 | ||
449 | /* handle current event */ | |
450 | if (pending_event_head == NULL) { | |
451 | printk(KERN_ERR "mf.c: stack empty for receiving ack\n"); | |
452 | return; | |
453 | } | |
454 | ||
455 | switch (event->hp_lp_event.xSubtype) { | |
456 | case 0: /* CE msg */ | |
457 | ce_msg_data = &event->data.ce_msg; | |
458 | if (ce_msg_data->ce_msg[3] != 0x40) { | |
459 | free_it = 1; | |
460 | break; | |
461 | } | |
462 | if (ce_msg_data->ce_msg[2] == 0) | |
463 | break; | |
464 | free_it = 1; | |
465 | pce_msg_data = &pending_event_head->event.data.ce_msg; | |
466 | if (pce_msg_data->completion != NULL) { | |
467 | ce_msg_comp_hdlr handler = | |
468 | pce_msg_data->completion->handler; | |
469 | void *token = pce_msg_data->completion->token; | |
470 | ||
471 | if (handler != NULL) | |
472 | (*handler)(token, ce_msg_data); | |
473 | } | |
474 | break; | |
475 | case 4: /* allocate */ | |
476 | case 5: /* deallocate */ | |
477 | if (pending_event_head->hdlr != NULL) | |
478 | (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count); | |
479 | free_it = 1; | |
480 | break; | |
481 | case 6: | |
482 | free_it = 1; | |
483 | rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token; | |
484 | if (rsp == NULL) { | |
485 | printk(KERN_ERR "mf.c: no rsp\n"); | |
486 | break; | |
487 | } | |
488 | if (rsp->response != NULL) | |
489 | memcpy(rsp->response, &event->data.vsp_cmd, | |
490 | sizeof(event->data.vsp_cmd)); | |
491 | complete(&rsp->com); | |
492 | break; | |
493 | } | |
494 | ||
495 | /* remove from queue */ | |
496 | spin_lock_irqsave(&pending_event_spinlock, flags); | |
497 | if ((pending_event_head != NULL) && (free_it == 1)) { | |
498 | struct pending_event *oldHead = pending_event_head; | |
499 | ||
500 | pending_event_head = pending_event_head->next; | |
501 | two = pending_event_head; | |
502 | free_pending_event(oldHead); | |
503 | } | |
504 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | |
505 | ||
506 | /* send next waiting event */ | |
507 | if (two != NULL) | |
508 | signal_event(NULL); | |
509 | } | |
510 | ||
511 | /* | |
512 | * This is the generic event handler we are registering with | |
513 | * the Hypervisor. Ensure the flows are for us, and then | |
514 | * parse it enough to know if it is an interrupt or an | |
515 | * acknowledge. | |
516 | */ | |
35a84c2f | 517 | static void hv_handler(struct HvLpEvent *event) |
1da177e4 LT |
518 | { |
519 | if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) { | |
677f8c0d | 520 | if (hvlpevent_is_ack(event)) |
1da177e4 | 521 | handle_ack((struct io_mf_lp_event *)event); |
677f8c0d | 522 | else |
1da177e4 | 523 | handle_int((struct io_mf_lp_event *)event); |
1da177e4 LT |
524 | } else |
525 | printk(KERN_ERR "mf.c: alien event received\n"); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Global kernel interface to allocate and seed events into the | |
530 | * Hypervisor. | |
531 | */ | |
532 | void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | |
533 | unsigned size, unsigned count, MFCompleteHandler hdlr, | |
534 | void *user_token) | |
535 | { | |
536 | struct pending_event *ev = new_pending_event(); | |
537 | int rc; | |
538 | ||
539 | if (ev == NULL) { | |
540 | rc = -ENOMEM; | |
541 | } else { | |
542 | ev->event.hp_lp_event.xSubtype = 4; | |
543 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | |
544 | ev->event.hp_lp_event.x.xSubtypeData = | |
545 | subtype_data('M', 'F', 'M', 'A'); | |
546 | ev->event.data.alloc.target_lp = target_lp; | |
547 | ev->event.data.alloc.type = type; | |
548 | ev->event.data.alloc.size = size; | |
549 | ev->event.data.alloc.count = count; | |
550 | ev->hdlr = hdlr; | |
551 | rc = signal_event(ev); | |
552 | } | |
553 | if ((rc != 0) && (hdlr != NULL)) | |
554 | (*hdlr)(user_token, rc); | |
555 | } | |
556 | EXPORT_SYMBOL(mf_allocate_lp_events); | |
557 | ||
558 | /* | |
559 | * Global kernel interface to unseed and deallocate events already in | |
560 | * Hypervisor. | |
561 | */ | |
562 | void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | |
563 | unsigned count, MFCompleteHandler hdlr, void *user_token) | |
564 | { | |
565 | struct pending_event *ev = new_pending_event(); | |
566 | int rc; | |
567 | ||
568 | if (ev == NULL) | |
569 | rc = -ENOMEM; | |
570 | else { | |
571 | ev->event.hp_lp_event.xSubtype = 5; | |
572 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | |
573 | ev->event.hp_lp_event.x.xSubtypeData = | |
574 | subtype_data('M', 'F', 'M', 'D'); | |
575 | ev->event.data.alloc.target_lp = target_lp; | |
576 | ev->event.data.alloc.type = type; | |
577 | ev->event.data.alloc.count = count; | |
578 | ev->hdlr = hdlr; | |
579 | rc = signal_event(ev); | |
580 | } | |
581 | if ((rc != 0) && (hdlr != NULL)) | |
582 | (*hdlr)(user_token, rc); | |
583 | } | |
584 | EXPORT_SYMBOL(mf_deallocate_lp_events); | |
585 | ||
586 | /* | |
587 | * Global kernel interface to tell the VSP object in the primary | |
588 | * partition to power this partition off. | |
589 | */ | |
590 | void mf_power_off(void) | |
591 | { | |
592 | printk(KERN_INFO "mf.c: Down it goes...\n"); | |
593 | signal_ce_msg_simple(0x4d, NULL); | |
594 | for (;;) | |
595 | ; | |
596 | } | |
597 | ||
598 | /* | |
599 | * Global kernel interface to tell the VSP object in the primary | |
600 | * partition to reboot this partition. | |
601 | */ | |
a9ea2101 | 602 | void mf_reboot(char *cmd) |
1da177e4 LT |
603 | { |
604 | printk(KERN_INFO "mf.c: Preparing to bounce...\n"); | |
605 | signal_ce_msg_simple(0x4e, NULL); | |
606 | for (;;) | |
607 | ; | |
608 | } | |
609 | ||
610 | /* | |
611 | * Display a single word SRC onto the VSP control panel. | |
612 | */ | |
613 | void mf_display_src(u32 word) | |
614 | { | |
615 | u8 ce[12]; | |
616 | ||
617 | memset(ce, 0, sizeof(ce)); | |
618 | ce[3] = 0x4a; | |
619 | ce[7] = 0x01; | |
620 | ce[8] = word >> 24; | |
621 | ce[9] = word >> 16; | |
622 | ce[10] = word >> 8; | |
623 | ce[11] = word; | |
624 | signal_ce_msg(ce, NULL); | |
625 | } | |
626 | ||
627 | /* | |
628 | * Display a single word SRC of the form "PROGXXXX" on the VSP control panel. | |
629 | */ | |
260de22f | 630 | static __init void mf_display_progress_src(u16 value) |
1da177e4 LT |
631 | { |
632 | u8 ce[12]; | |
633 | u8 src[72]; | |
634 | ||
635 | memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12); | |
636 | memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00" | |
637 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | |
638 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | |
639 | "\x00\x00\x00\x00PROGxxxx ", | |
640 | 72); | |
641 | src[6] = value >> 8; | |
642 | src[7] = value & 255; | |
643 | src[44] = "0123456789ABCDEF"[(value >> 12) & 15]; | |
644 | src[45] = "0123456789ABCDEF"[(value >> 8) & 15]; | |
645 | src[46] = "0123456789ABCDEF"[(value >> 4) & 15]; | |
646 | src[47] = "0123456789ABCDEF"[value & 15]; | |
647 | dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024); | |
648 | } | |
649 | ||
650 | /* | |
651 | * Clear the VSP control panel. Used to "erase" an SRC that was | |
652 | * previously displayed. | |
653 | */ | |
260de22f | 654 | static void mf_clear_src(void) |
1da177e4 LT |
655 | { |
656 | signal_ce_msg_simple(0x4b, NULL); | |
657 | } | |
658 | ||
260de22f ME |
659 | void __init mf_display_progress(u16 value) |
660 | { | |
4bd174fe | 661 | if (!mf_initialized) |
260de22f ME |
662 | return; |
663 | ||
664 | if (0xFFFF == value) | |
665 | mf_clear_src(); | |
666 | else | |
667 | mf_display_progress_src(value); | |
668 | } | |
669 | ||
1da177e4 LT |
670 | /* |
671 | * Initialization code here. | |
672 | */ | |
260de22f | 673 | void __init mf_init(void) |
1da177e4 LT |
674 | { |
675 | int i; | |
676 | ||
1da177e4 | 677 | spin_lock_init(&pending_event_spinlock); |
260de22f ME |
678 | |
679 | for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++) | |
1da177e4 | 680 | free_pending_event(&pending_event_prealloc[i]); |
260de22f | 681 | |
1da177e4 LT |
682 | HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler); |
683 | ||
684 | /* virtual continue ack */ | |
685 | signal_ce_msg_simple(0x57, NULL); | |
686 | ||
260de22f ME |
687 | mf_initialized = 1; |
688 | mb(); | |
689 | ||
1da177e4 LT |
690 | printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities " |
691 | "initialized\n"); | |
692 | } | |
693 | ||
694 | struct rtc_time_data { | |
695 | struct completion com; | |
696 | struct ce_msg_data ce_msg; | |
697 | int rc; | |
698 | }; | |
699 | ||
700 | static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | |
701 | { | |
702 | struct rtc_time_data *rtc = token; | |
703 | ||
704 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | |
705 | rtc->rc = 0; | |
706 | complete(&rtc->com); | |
707 | } | |
708 | ||
00611c5c ME |
709 | static int mf_set_rtc(struct rtc_time *tm) |
710 | { | |
711 | char ce_time[12]; | |
712 | u8 day, mon, hour, min, sec, y1, y2; | |
713 | unsigned year; | |
714 | ||
715 | year = 1900 + tm->tm_year; | |
716 | y1 = year / 100; | |
717 | y2 = year % 100; | |
718 | ||
719 | sec = tm->tm_sec; | |
720 | min = tm->tm_min; | |
721 | hour = tm->tm_hour; | |
722 | day = tm->tm_mday; | |
723 | mon = tm->tm_mon + 1; | |
724 | ||
725 | BIN_TO_BCD(sec); | |
726 | BIN_TO_BCD(min); | |
727 | BIN_TO_BCD(hour); | |
728 | BIN_TO_BCD(mon); | |
729 | BIN_TO_BCD(day); | |
730 | BIN_TO_BCD(y1); | |
731 | BIN_TO_BCD(y2); | |
732 | ||
733 | memset(ce_time, 0, sizeof(ce_time)); | |
734 | ce_time[3] = 0x41; | |
735 | ce_time[4] = y1; | |
736 | ce_time[5] = y2; | |
737 | ce_time[6] = sec; | |
738 | ce_time[7] = min; | |
739 | ce_time[8] = hour; | |
740 | ce_time[10] = day; | |
741 | ce_time[11] = mon; | |
742 | ||
743 | return signal_ce_msg(ce_time, NULL); | |
744 | } | |
745 | ||
d0e8e291 | 746 | static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm) |
1da177e4 | 747 | { |
1da177e4 LT |
748 | tm->tm_wday = 0; |
749 | tm->tm_yday = 0; | |
750 | tm->tm_isdst = 0; | |
d0e8e291 | 751 | if (rc) { |
1da177e4 LT |
752 | tm->tm_sec = 0; |
753 | tm->tm_min = 0; | |
754 | tm->tm_hour = 0; | |
755 | tm->tm_mday = 15; | |
756 | tm->tm_mon = 5; | |
757 | tm->tm_year = 52; | |
d0e8e291 | 758 | return rc; |
1da177e4 LT |
759 | } |
760 | ||
d0e8e291 SR |
761 | if ((ce_msg[2] == 0xa9) || |
762 | (ce_msg[2] == 0xaf)) { | |
1da177e4 LT |
763 | /* TOD clock is not set */ |
764 | tm->tm_sec = 1; | |
765 | tm->tm_min = 1; | |
766 | tm->tm_hour = 1; | |
767 | tm->tm_mday = 10; | |
768 | tm->tm_mon = 8; | |
769 | tm->tm_year = 71; | |
770 | mf_set_rtc(tm); | |
771 | } | |
772 | { | |
1da177e4 LT |
773 | u8 year = ce_msg[5]; |
774 | u8 sec = ce_msg[6]; | |
775 | u8 min = ce_msg[7]; | |
776 | u8 hour = ce_msg[8]; | |
777 | u8 day = ce_msg[10]; | |
778 | u8 mon = ce_msg[11]; | |
779 | ||
780 | BCD_TO_BIN(sec); | |
781 | BCD_TO_BIN(min); | |
782 | BCD_TO_BIN(hour); | |
783 | BCD_TO_BIN(day); | |
784 | BCD_TO_BIN(mon); | |
785 | BCD_TO_BIN(year); | |
786 | ||
787 | if (year <= 69) | |
788 | year += 100; | |
789 | ||
790 | tm->tm_sec = sec; | |
791 | tm->tm_min = min; | |
792 | tm->tm_hour = hour; | |
793 | tm->tm_mday = day; | |
794 | tm->tm_mon = mon; | |
795 | tm->tm_year = year; | |
796 | } | |
797 | ||
798 | return 0; | |
799 | } | |
d0e8e291 | 800 | |
00611c5c | 801 | static int mf_get_rtc(struct rtc_time *tm) |
d0e8e291 SR |
802 | { |
803 | struct ce_msg_comp_data ce_complete; | |
804 | struct rtc_time_data rtc_data; | |
805 | int rc; | |
806 | ||
807 | memset(&ce_complete, 0, sizeof(ce_complete)); | |
808 | memset(&rtc_data, 0, sizeof(rtc_data)); | |
809 | init_completion(&rtc_data.com); | |
810 | ce_complete.handler = &get_rtc_time_complete; | |
811 | ce_complete.token = &rtc_data; | |
812 | rc = signal_ce_msg_simple(0x40, &ce_complete); | |
813 | if (rc) | |
814 | return rc; | |
815 | wait_for_completion(&rtc_data.com); | |
816 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | |
817 | } | |
818 | ||
819 | struct boot_rtc_time_data { | |
820 | int busy; | |
821 | struct ce_msg_data ce_msg; | |
822 | int rc; | |
823 | }; | |
824 | ||
825 | static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | |
826 | { | |
827 | struct boot_rtc_time_data *rtc = token; | |
828 | ||
829 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | |
830 | rtc->rc = 0; | |
831 | rtc->busy = 0; | |
832 | } | |
833 | ||
00611c5c | 834 | static int mf_get_boot_rtc(struct rtc_time *tm) |
d0e8e291 SR |
835 | { |
836 | struct ce_msg_comp_data ce_complete; | |
837 | struct boot_rtc_time_data rtc_data; | |
838 | int rc; | |
839 | ||
840 | memset(&ce_complete, 0, sizeof(ce_complete)); | |
841 | memset(&rtc_data, 0, sizeof(rtc_data)); | |
842 | rtc_data.busy = 1; | |
843 | ce_complete.handler = &get_boot_rtc_time_complete; | |
844 | ce_complete.token = &rtc_data; | |
845 | rc = signal_ce_msg_simple(0x40, &ce_complete); | |
846 | if (rc) | |
847 | return rc; | |
848 | /* We need to poll here as we are not yet taking interrupts */ | |
849 | while (rtc_data.busy) { | |
937b31b1 | 850 | if (hvlpevent_is_pending()) |
35a84c2f | 851 | process_hvlpevents(); |
d0e8e291 SR |
852 | } |
853 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | |
854 | } | |
1da177e4 | 855 | |
1da177e4 LT |
856 | #ifdef CONFIG_PROC_FS |
857 | ||
858 | static int proc_mf_dump_cmdline(char *page, char **start, off_t off, | |
859 | int count, int *eof, void *data) | |
860 | { | |
861 | int len; | |
862 | char *p; | |
863 | struct vsp_cmd_data vsp_cmd; | |
864 | int rc; | |
865 | dma_addr_t dma_addr; | |
866 | ||
867 | /* The HV appears to return no more than 256 bytes of command line */ | |
868 | if (off >= 256) | |
869 | return 0; | |
870 | if ((off + count) > 256) | |
871 | count = 256 - off; | |
872 | ||
873 | dma_addr = dma_map_single(iSeries_vio_dev, page, off + count, | |
874 | DMA_FROM_DEVICE); | |
875 | if (dma_mapping_error(dma_addr)) | |
876 | return -ENOMEM; | |
877 | memset(page, 0, off + count); | |
878 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
879 | vsp_cmd.cmd = 33; | |
880 | vsp_cmd.sub_data.kern.token = dma_addr; | |
881 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | |
882 | vsp_cmd.sub_data.kern.side = (u64)data; | |
883 | vsp_cmd.sub_data.kern.length = off + count; | |
884 | mb(); | |
885 | rc = signal_vsp_instruction(&vsp_cmd); | |
886 | dma_unmap_single(iSeries_vio_dev, dma_addr, off + count, | |
887 | DMA_FROM_DEVICE); | |
888 | if (rc) | |
889 | return rc; | |
890 | if (vsp_cmd.result_code != 0) | |
891 | return -ENOMEM; | |
892 | p = page; | |
893 | len = 0; | |
894 | while (len < (off + count)) { | |
895 | if ((*p == '\0') || (*p == '\n')) { | |
896 | if (*p == '\0') | |
897 | *p = '\n'; | |
898 | p++; | |
899 | len++; | |
900 | *eof = 1; | |
901 | break; | |
902 | } | |
903 | p++; | |
904 | len++; | |
905 | } | |
906 | ||
907 | if (len < off) { | |
908 | *eof = 1; | |
909 | len = 0; | |
910 | } | |
911 | return len; | |
912 | } | |
913 | ||
914 | #if 0 | |
915 | static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side) | |
916 | { | |
917 | struct vsp_cmd_data vsp_cmd; | |
918 | int rc; | |
919 | int len = *size; | |
920 | dma_addr_t dma_addr; | |
921 | ||
922 | dma_addr = dma_map_single(iSeries_vio_dev, buffer, len, | |
923 | DMA_FROM_DEVICE); | |
924 | memset(buffer, 0, len); | |
925 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
926 | vsp_cmd.cmd = 32; | |
927 | vsp_cmd.sub_data.kern.token = dma_addr; | |
928 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | |
929 | vsp_cmd.sub_data.kern.side = side; | |
930 | vsp_cmd.sub_data.kern.offset = offset; | |
931 | vsp_cmd.sub_data.kern.length = len; | |
932 | mb(); | |
933 | rc = signal_vsp_instruction(&vsp_cmd); | |
934 | if (rc == 0) { | |
935 | if (vsp_cmd.result_code == 0) | |
936 | *size = vsp_cmd.sub_data.length_out; | |
937 | else | |
938 | rc = -ENOMEM; | |
939 | } | |
940 | ||
941 | dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE); | |
942 | ||
943 | return rc; | |
944 | } | |
945 | ||
946 | static int proc_mf_dump_vmlinux(char *page, char **start, off_t off, | |
947 | int count, int *eof, void *data) | |
948 | { | |
949 | int sizeToGet = count; | |
950 | ||
951 | if (!capable(CAP_SYS_ADMIN)) | |
952 | return -EACCES; | |
953 | ||
954 | if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) { | |
955 | if (sizeToGet != 0) { | |
956 | *start = page + off; | |
957 | return sizeToGet; | |
958 | } | |
959 | *eof = 1; | |
960 | return 0; | |
961 | } | |
962 | *eof = 1; | |
963 | return 0; | |
964 | } | |
965 | #endif | |
966 | ||
967 | static int proc_mf_dump_side(char *page, char **start, off_t off, | |
968 | int count, int *eof, void *data) | |
969 | { | |
970 | int len; | |
971 | char mf_current_side = ' '; | |
972 | struct vsp_cmd_data vsp_cmd; | |
973 | ||
974 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
975 | vsp_cmd.cmd = 2; | |
976 | vsp_cmd.sub_data.ipl_type = 0; | |
977 | mb(); | |
978 | ||
979 | if (signal_vsp_instruction(&vsp_cmd) == 0) { | |
980 | if (vsp_cmd.result_code == 0) { | |
981 | switch (vsp_cmd.sub_data.ipl_type) { | |
982 | case 0: mf_current_side = 'A'; | |
983 | break; | |
984 | case 1: mf_current_side = 'B'; | |
985 | break; | |
986 | case 2: mf_current_side = 'C'; | |
987 | break; | |
988 | default: mf_current_side = 'D'; | |
989 | break; | |
990 | } | |
991 | } | |
992 | } | |
993 | ||
994 | len = sprintf(page, "%c\n", mf_current_side); | |
995 | ||
996 | if (len <= (off + count)) | |
997 | *eof = 1; | |
998 | *start = page + off; | |
999 | len -= off; | |
1000 | if (len > count) | |
1001 | len = count; | |
1002 | if (len < 0) | |
1003 | len = 0; | |
1004 | return len; | |
1005 | } | |
1006 | ||
1007 | static int proc_mf_change_side(struct file *file, const char __user *buffer, | |
1008 | unsigned long count, void *data) | |
1009 | { | |
1010 | char side; | |
1011 | u64 newSide; | |
1012 | struct vsp_cmd_data vsp_cmd; | |
1013 | ||
1014 | if (!capable(CAP_SYS_ADMIN)) | |
1015 | return -EACCES; | |
1016 | ||
1017 | if (count == 0) | |
1018 | return 0; | |
1019 | ||
1020 | if (get_user(side, buffer)) | |
1021 | return -EFAULT; | |
1022 | ||
1023 | switch (side) { | |
1024 | case 'A': newSide = 0; | |
1025 | break; | |
1026 | case 'B': newSide = 1; | |
1027 | break; | |
1028 | case 'C': newSide = 2; | |
1029 | break; | |
1030 | case 'D': newSide = 3; | |
1031 | break; | |
1032 | default: | |
1033 | printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n"); | |
1034 | return -EINVAL; | |
1035 | } | |
1036 | ||
1037 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
1038 | vsp_cmd.sub_data.ipl_type = newSide; | |
1039 | vsp_cmd.cmd = 10; | |
1040 | ||
1041 | (void)signal_vsp_instruction(&vsp_cmd); | |
1042 | ||
1043 | return count; | |
1044 | } | |
1045 | ||
1046 | #if 0 | |
1047 | static void mf_getSrcHistory(char *buffer, int size) | |
1048 | { | |
1049 | struct IplTypeReturnStuff return_stuff; | |
1050 | struct pending_event *ev = new_pending_event(); | |
1051 | int rc = 0; | |
1052 | char *pages[4]; | |
1053 | ||
1054 | pages[0] = kmalloc(4096, GFP_ATOMIC); | |
1055 | pages[1] = kmalloc(4096, GFP_ATOMIC); | |
1056 | pages[2] = kmalloc(4096, GFP_ATOMIC); | |
1057 | pages[3] = kmalloc(4096, GFP_ATOMIC); | |
1058 | if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL) | |
1059 | || (pages[2] == NULL) || (pages[3] == NULL)) | |
1060 | return -ENOMEM; | |
1061 | ||
1062 | return_stuff.xType = 0; | |
1063 | return_stuff.xRc = 0; | |
1064 | return_stuff.xDone = 0; | |
1065 | ev->event.hp_lp_event.xSubtype = 6; | |
1066 | ev->event.hp_lp_event.x.xSubtypeData = | |
1067 | subtype_data('M', 'F', 'V', 'I'); | |
1068 | ev->event.data.vsp_cmd.xEvent = &return_stuff; | |
1069 | ev->event.data.vsp_cmd.cmd = 4; | |
1070 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | |
1071 | ev->event.data.vsp_cmd.result_code = 0xFF; | |
1072 | ev->event.data.vsp_cmd.reserved = 0; | |
426c1a11 SR |
1073 | ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]); |
1074 | ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]); | |
1075 | ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]); | |
1076 | ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]); | |
1da177e4 LT |
1077 | mb(); |
1078 | if (signal_event(ev) != 0) | |
1079 | return; | |
1080 | ||
1081 | while (return_stuff.xDone != 1) | |
1082 | udelay(10); | |
1083 | if (return_stuff.xRc == 0) | |
1084 | memcpy(buffer, pages[0], size); | |
1085 | kfree(pages[0]); | |
1086 | kfree(pages[1]); | |
1087 | kfree(pages[2]); | |
1088 | kfree(pages[3]); | |
1089 | } | |
1090 | #endif | |
1091 | ||
1092 | static int proc_mf_dump_src(char *page, char **start, off_t off, | |
1093 | int count, int *eof, void *data) | |
1094 | { | |
1095 | #if 0 | |
1096 | int len; | |
1097 | ||
1098 | mf_getSrcHistory(page, count); | |
1099 | len = count; | |
1100 | len -= off; | |
1101 | if (len < count) { | |
1102 | *eof = 1; | |
1103 | if (len <= 0) | |
1104 | return 0; | |
1105 | } else | |
1106 | len = count; | |
1107 | *start = page + off; | |
1108 | return len; | |
1109 | #else | |
1110 | return 0; | |
1111 | #endif | |
1112 | } | |
1113 | ||
1114 | static int proc_mf_change_src(struct file *file, const char __user *buffer, | |
1115 | unsigned long count, void *data) | |
1116 | { | |
1117 | char stkbuf[10]; | |
1118 | ||
1119 | if (!capable(CAP_SYS_ADMIN)) | |
1120 | return -EACCES; | |
1121 | ||
1122 | if ((count < 4) && (count != 1)) { | |
1123 | printk(KERN_ERR "mf_proc: invalid src\n"); | |
1124 | return -EINVAL; | |
1125 | } | |
1126 | ||
1127 | if (count > (sizeof(stkbuf) - 1)) | |
1128 | count = sizeof(stkbuf) - 1; | |
1129 | if (copy_from_user(stkbuf, buffer, count)) | |
1130 | return -EFAULT; | |
1131 | ||
1132 | if ((count == 1) && (*stkbuf == '\0')) | |
1133 | mf_clear_src(); | |
1134 | else | |
1135 | mf_display_src(*(u32 *)stkbuf); | |
1136 | ||
1137 | return count; | |
1138 | } | |
1139 | ||
1140 | static int proc_mf_change_cmdline(struct file *file, const char __user *buffer, | |
1141 | unsigned long count, void *data) | |
1142 | { | |
1143 | struct vsp_cmd_data vsp_cmd; | |
1144 | dma_addr_t dma_addr; | |
1145 | char *page; | |
1146 | int ret = -EACCES; | |
1147 | ||
1148 | if (!capable(CAP_SYS_ADMIN)) | |
1149 | goto out; | |
1150 | ||
1151 | dma_addr = 0; | |
1152 | page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr, | |
1153 | GFP_ATOMIC); | |
1154 | ret = -ENOMEM; | |
1155 | if (page == NULL) | |
1156 | goto out; | |
1157 | ||
1158 | ret = -EFAULT; | |
1159 | if (copy_from_user(page, buffer, count)) | |
1160 | goto out_free; | |
1161 | ||
1162 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
1163 | vsp_cmd.cmd = 31; | |
1164 | vsp_cmd.sub_data.kern.token = dma_addr; | |
1165 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | |
1166 | vsp_cmd.sub_data.kern.side = (u64)data; | |
1167 | vsp_cmd.sub_data.kern.length = count; | |
1168 | mb(); | |
1169 | (void)signal_vsp_instruction(&vsp_cmd); | |
1170 | ret = count; | |
1171 | ||
1172 | out_free: | |
1173 | dma_free_coherent(iSeries_vio_dev, count, page, dma_addr); | |
1174 | out: | |
1175 | return ret; | |
1176 | } | |
1177 | ||
1178 | static ssize_t proc_mf_change_vmlinux(struct file *file, | |
1179 | const char __user *buf, | |
1180 | size_t count, loff_t *ppos) | |
1181 | { | |
b4d1ab58 | 1182 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); |
1da177e4 LT |
1183 | ssize_t rc; |
1184 | dma_addr_t dma_addr; | |
1185 | char *page; | |
1186 | struct vsp_cmd_data vsp_cmd; | |
1187 | ||
1188 | rc = -EACCES; | |
1189 | if (!capable(CAP_SYS_ADMIN)) | |
1190 | goto out; | |
1191 | ||
1192 | dma_addr = 0; | |
1193 | page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr, | |
1194 | GFP_ATOMIC); | |
1195 | rc = -ENOMEM; | |
1196 | if (page == NULL) { | |
1197 | printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n"); | |
1198 | goto out; | |
1199 | } | |
1200 | rc = -EFAULT; | |
1201 | if (copy_from_user(page, buf, count)) | |
1202 | goto out_free; | |
1203 | ||
1204 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | |
1205 | vsp_cmd.cmd = 30; | |
1206 | vsp_cmd.sub_data.kern.token = dma_addr; | |
1207 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | |
1208 | vsp_cmd.sub_data.kern.side = (u64)dp->data; | |
1209 | vsp_cmd.sub_data.kern.offset = *ppos; | |
1210 | vsp_cmd.sub_data.kern.length = count; | |
1211 | mb(); | |
1212 | rc = signal_vsp_instruction(&vsp_cmd); | |
1213 | if (rc) | |
1214 | goto out_free; | |
1215 | rc = -ENOMEM; | |
1216 | if (vsp_cmd.result_code != 0) | |
1217 | goto out_free; | |
1218 | ||
1219 | *ppos += count; | |
1220 | rc = count; | |
1221 | out_free: | |
1222 | dma_free_coherent(iSeries_vio_dev, count, page, dma_addr); | |
1223 | out: | |
1224 | return rc; | |
1225 | } | |
1226 | ||
5dfe4c96 | 1227 | static const struct file_operations proc_vmlinux_operations = { |
1da177e4 LT |
1228 | .write = proc_mf_change_vmlinux, |
1229 | }; | |
1230 | ||
1231 | static int __init mf_proc_init(void) | |
1232 | { | |
1233 | struct proc_dir_entry *mf_proc_root; | |
1234 | struct proc_dir_entry *ent; | |
1235 | struct proc_dir_entry *mf; | |
1236 | char name[2]; | |
1237 | int i; | |
1238 | ||
d9523aa1 SR |
1239 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
1240 | return 0; | |
1241 | ||
1da177e4 LT |
1242 | mf_proc_root = proc_mkdir("iSeries/mf", NULL); |
1243 | if (!mf_proc_root) | |
1244 | return 1; | |
1245 | ||
1246 | name[1] = '\0'; | |
1247 | for (i = 0; i < 4; i++) { | |
1248 | name[0] = 'A' + i; | |
1249 | mf = proc_mkdir(name, mf_proc_root); | |
1250 | if (!mf) | |
1251 | return 1; | |
1252 | ||
1253 | ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf); | |
1254 | if (!ent) | |
1255 | return 1; | |
1da177e4 LT |
1256 | ent->data = (void *)(long)i; |
1257 | ent->read_proc = proc_mf_dump_cmdline; | |
1258 | ent->write_proc = proc_mf_change_cmdline; | |
1259 | ||
1260 | if (i == 3) /* no vmlinux entry for 'D' */ | |
1261 | continue; | |
1262 | ||
1263 | ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf); | |
1264 | if (!ent) | |
1265 | return 1; | |
1da177e4 LT |
1266 | ent->data = (void *)(long)i; |
1267 | ent->proc_fops = &proc_vmlinux_operations; | |
1268 | } | |
1269 | ||
1270 | ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | |
1271 | if (!ent) | |
1272 | return 1; | |
1da177e4 LT |
1273 | ent->data = (void *)0; |
1274 | ent->read_proc = proc_mf_dump_side; | |
1275 | ent->write_proc = proc_mf_change_side; | |
1276 | ||
1277 | ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | |
1278 | if (!ent) | |
1279 | return 1; | |
1da177e4 LT |
1280 | ent->data = (void *)0; |
1281 | ent->read_proc = proc_mf_dump_src; | |
1282 | ent->write_proc = proc_mf_change_src; | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | __initcall(mf_proc_init); | |
1288 | ||
1289 | #endif /* CONFIG_PROC_FS */ | |
c8b84976 SR |
1290 | |
1291 | /* | |
1292 | * Get the RTC from the virtual service processor | |
1293 | * This requires flowing LpEvents to the primary partition | |
1294 | */ | |
1295 | void iSeries_get_rtc_time(struct rtc_time *rtc_tm) | |
1296 | { | |
c8b84976 SR |
1297 | mf_get_rtc(rtc_tm); |
1298 | rtc_tm->tm_mon--; | |
1299 | } | |
1300 | ||
1301 | /* | |
1302 | * Set the RTC in the virtual service processor | |
1303 | * This requires flowing LpEvents to the primary partition | |
1304 | */ | |
1305 | int iSeries_set_rtc_time(struct rtc_time *tm) | |
1306 | { | |
1307 | mf_set_rtc(tm); | |
1308 | return 0; | |
1309 | } | |
1310 | ||
143a1dec | 1311 | unsigned long iSeries_get_boot_time(void) |
c8b84976 | 1312 | { |
143a1dec PM |
1313 | struct rtc_time tm; |
1314 | ||
143a1dec PM |
1315 | mf_get_boot_rtc(&tm); |
1316 | return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, | |
1317 | tm.tm_hour, tm.tm_min, tm.tm_sec); | |
c8b84976 | 1318 | } |