#define CPU_STATE_LOAD 0x04
uint8_t cpu_state;
+ /* currently processed sigp order */
+ uint8_t sigp_order;
+
} CPUS390XState;
#include "cpu-qom.h"
#include "ioinst.h"
+
#ifndef CONFIG_USER_ONLY
-void *s390_cpu_physical_memory_map(CPUS390XState *env, hwaddr addr, hwaddr *len,
- int is_write);
-void s390_cpu_physical_memory_unmap(CPUS390XState *env, void *addr, hwaddr len,
- int is_write);
+void do_restart_interrupt(CPUS390XState *env);
+
static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb)
{
hwaddr addr = 0;
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len,
+ bool is_write);
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
+int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
#else
static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
{
static inline void kvm_s390_service_interrupt(uint32_t parm)
{
}
+static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ return -ENOSYS;
+}
+static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ return -ENOSYS;
+}
+static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf,
+ int len, bool is_write)
+{
+ return -ENOSYS;
+}
static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
uint64_t te_code)
{
}
#endif
+
+static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_get_clock(tod_high, tod_low);
+ }
+ /* Fixme TCG */
+ *tod_high = 0;
+ *tod_low = 0;
+ return 0;
+}
+
+static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_set_clock(tod_high, tod_low);
+ }
+ /* Fixme TCG */
+ return 0;
+}
+
S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
unsigned int s390_cpu_halt(S390CPU *cpu);
void s390_cpu_unhalt(S390CPU *cpu);
unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
+static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
+{
+ return cpu->env.cpu_state;
+}
+
+void gtod_save(QEMUFile *f, void *opaque);
+int gtod_load(QEMUFile *f, void *opaque, int version_id);
/* service interrupts are floating therefore we must not pass an cpustate */
void s390_sclp_extint(uint32_t parm);
void css_conditional_io_interrupt(SubchDev *sch);
int css_do_stsch(SubchDev *sch, SCHIB *schib);
bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
-int css_do_msch(SubchDev *sch, SCHIB *schib);
+int css_do_msch(SubchDev *sch, const SCHIB *schib);
int css_do_xsch(SubchDev *sch);
int css_do_csch(SubchDev *sch);
int css_do_hsch(SubchDev *sch);
int css_do_ssch(SubchDev *sch, ORB *orb);
-int css_do_tsch(SubchDev *sch, IRB *irb);
+int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
+void css_do_tsch_update_subch(SubchDev *sch);
int css_do_stcrw(CRW *crw);
+void css_undo_stcrw(CRW *crw);
int css_do_tpi(IOIntCode *int_code, int lowcore);
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf);
bool css_present(uint8_t cssid);
#endif
-#define cpu_init(model) (&cpu_s390x_init(model)->env)
+#define cpu_init(model) CPU(cpu_s390x_init(model))
#define cpu_exec cpu_s390x_exec
#define cpu_gen_code cpu_s390x_gen_code
#define cpu_signal_handler cpu_s390x_signal_handler
PSW mcck_old_psw; /* 0x160 */
PSW io_old_psw; /* 0x170 */
uint8_t pad7[0x1a0-0x180]; /* 0x180 */
- PSW restart_psw; /* 0x1a0 */
+ PSW restart_new_psw; /* 0x1a0 */
PSW external_new_psw; /* 0x1b0 */
PSW svc_new_psw; /* 0x1c0 */
PSW program_new_psw; /* 0x1d0 */
uint8_t name[8];
uint32_t caf;
uint8_t cpi[16];
- uint8_t res3[24];
+ uint8_t res5[3];
+ uint8_t ext_name_encoding;
+ uint32_t res3;
+ uint8_t uuid[16];
} vm[8];
- uint8_t res4[3552];
+ uint8_t res4[1504];
+ uint8_t ext_names[8][256];
};
/* MMU defines */
#define SK_F (0x1 << 3)
#define SK_ACC_MASK (0xf << 4)
+/* SIGP order codes */
#define SIGP_SENSE 0x01
#define SIGP_EXTERNAL_CALL 0x02
#define SIGP_EMERGENCY 0x03
#define SIGP_STORE_STATUS_ADDR 0x0e
#define SIGP_SET_ARCH 0x12
-/* cpu status bits */
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED 1
+#define SIGP_CC_BUSY 2
+#define SIGP_CC_NOT_OPERATIONAL 3
+
+/* SIGP status bits */
#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
#define SIGP_STAT_INVALID_ORDER 0x00000002UL
#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+/* SIGP SET ARCHITECTURE modes */
+#define SIGP_MODE_ESA_S390 0
+#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
+#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
+
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
target_ulong *raddr, int *flags, bool exc);
void kvm_s390_clear_cmma_callback(void *opaque);
int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
void kvm_s390_reset_vcpu(S390CPU *cpu);
+int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
#else
static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
uint16_t subchannel_nr,
static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
{
}
+static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
+ uint64_t *hw_limit)
+{
+ return 0;
+}
#endif
+static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
+ }
+ return 0;
+}
+
static inline void cmma_reset(S390CPU *cpu)
{
if (kvm_enabled()) {