]> Git Repo - linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
authorLinus Torvalds <[email protected]>
Wed, 16 Mar 2016 17:47:45 +0000 (10:47 -0700)
committerLinus Torvalds <[email protected]>
Wed, 16 Mar 2016 17:47:45 +0000 (10:47 -0700)
Pull s390 updates from Martin Schwidefsky:

 - Add the CPU id for the new z13s machine

 - Add a s390 specific XOR template for RAID-5 checksumming based on the
   XC instruction.  Remove all other alternatives, XC is always faster

 - The merge of our four different stack tracers into a single one

 - Tidy up the code related to page tables, several large inline
   functions are now out-of-line.  Bloat-o-meter reports ~11K text size
   reduction

 - A binary interface for the priviledged CLP instruction to retrieve
   the hardware view of the installed PCI functions

 - Improvements for the dasd format code

 - Bug fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (31 commits)
  s390/pci: enforce fmb page boundary rule
  s390: fix floating pointer register corruption (again)
  s390/cpumf: add missing lpp magic initialization
  s390: Fix misspellings in comments
  s390/mm: split arch/s390/mm/pgtable.c
  s390/mm: uninline pmdp_xxx functions from pgtable.h
  s390/mm: uninline ptep_xxx functions from pgtable.h
  s390/pci: add ioctl interface for CLP
  s390: Use pr_warn instead of pr_warning
  s390/dasd: remove casts to dasd_*_private
  s390/dasd: Refactor dasd format functions
  s390/dasd: Simplify code in format logic
  s390/dasd: Improve dasd format code
  s390/percpu: remove this_cpu_cmpxchg_double_4
  s390/cpumf: Improve guest detection heuristics
  s390/fault: merge report_user_fault implementations
  s390/dis: use correct escape sequence for '%' character
  s390/kvm: simplify set_guest_storage_key
  s390/oprofile: add z13/z13s model numbers
  s390: add z13s model number to z13 elf platform
  ...

1  2 
arch/s390/include/asm/pgalloc.h
arch/s390/kernel/setup.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
drivers/s390/block/dasd_diag.c

index d7cc79fb6191117a0ee0646ae6bbdf3a92a58132,92487193706ccd036f03f86632b05ef7e1e91f19..9b3d9b6099f2a8dd76a14f874dad8cc2715c2267
@@@ -23,10 -23,6 +23,6 @@@ void page_table_free(struct mm_struct *
  void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
  extern int page_table_allocate_pgste;
  
- int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
-                         unsigned long key, bool nq);
- unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
  static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  {
        typedef struct { char _[n]; } addrtype;
@@@ -100,26 -96,12 +96,26 @@@ static inline void pud_populate(struct 
  
  static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  {
 -      spin_lock_init(&mm->context.list_lock);
 -      INIT_LIST_HEAD(&mm->context.pgtable_list);
 -      INIT_LIST_HEAD(&mm->context.gmap_list);
 -      return (pgd_t *) crst_table_alloc(mm);
 +      unsigned long *table = crst_table_alloc(mm);
 +
 +      if (!table)
 +              return NULL;
 +      if (mm->context.asce_limit == (1UL << 31)) {
 +              /* Forking a compat process with 2 page table levels */
 +              if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
 +                      crst_table_free(mm, table);
 +                      return NULL;
 +              }
 +      }
 +      return (pgd_t *) table;
 +}
 +
 +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 +{
 +      if (mm->context.asce_limit == (1UL << 31))
 +              pgtable_pmd_page_dtor(virt_to_page(pgd));
 +      crst_table_free(mm, (unsigned long *) pgd);
  }
 -#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
  
  static inline void pmd_populate(struct mm_struct *mm,
                                pmd_t *pmd, pgtable_t pte)
diff --combined arch/s390/kernel/setup.c
index cedb0198675f7577c7237bb61c006acaf0e984d4,cc46767e902e262b461b0ab76402fef94a04404f..d3f9688f26b5e7acead2bb9a420eb0e0597d27f6
@@@ -327,6 -327,7 +327,7 @@@ static void __init setup_lowcore(void
                + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
        lc->current_task = (unsigned long) init_thread_union.thread_info.task;
        lc->thread_info = (unsigned long) &init_thread_union;
+       lc->lpp = LPP_MAGIC;
        lc->machine_flags = S390_lowcore.machine_flags;
        lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
        memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
  
  static struct resource code_resource = {
        .name  = "Kernel code",
 -      .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 +      .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  };
  
  static struct resource data_resource = {
        .name = "Kernel data",
 -      .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 +      .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  };
  
  static struct resource bss_resource = {
        .name = "Kernel bss",
 -      .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 +      .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  };
  
  static struct resource __initdata *standard_resources[] = {
@@@ -408,7 -409,7 +409,7 @@@ static void __init setup_resources(void
  
        for_each_memblock(memory, reg) {
                res = alloc_bootmem_low(sizeof(*res));
 -              res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
 +              res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
  
                res->name = "System RAM";
                res->start = reg->base;
@@@ -779,6 -780,7 +780,7 @@@ static int __init setup_hwcaps(void
                strcpy(elf_platform, "zEC12");
                break;
        case 0x2964:
+       case 0x2965:
                strcpy(elf_platform, "z13");
                break;
        }
index 704809d91dddf759d9d7e4b33b86e4c3dc29441e,e5e8739dcde3d64dcbebca95e7e2d29cd2752a69..84efc2ba6a90555de35f6c3b78a47ab455b3beb6
@@@ -23,6 -23,7 +23,7 @@@
  #include <asm/uaccess.h>
  #include <asm/sclp.h>
  #include <asm/isc.h>
+ #include <asm/gmap.h>
  #include "kvm-s390.h"
  #include "gaccess.h"
  #include "trace-s390.h"
@@@ -182,9 -183,8 +183,9 @@@ static int cpu_timer_interrupts_enabled
  
  static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
  {
 -      return (vcpu->arch.sie_block->cputm >> 63) &&
 -             cpu_timer_interrupts_enabled(vcpu);
 +      if (!cpu_timer_interrupts_enabled(vcpu))
 +              return 0;
 +      return kvm_s390_get_cpu_timer(vcpu) >> 63;
  }
  
  static inline int is_ioirq(unsigned long irq_type)
@@@ -336,6 -336,23 +337,6 @@@ static void set_intercept_indicators(st
        set_intercept_indicators_stop(vcpu);
  }
  
 -static u16 get_ilc(struct kvm_vcpu *vcpu)
 -{
 -      switch (vcpu->arch.sie_block->icptcode) {
 -      case ICPT_INST:
 -      case ICPT_INSTPROGI:
 -      case ICPT_OPEREXC:
 -      case ICPT_PARTEXEC:
 -      case ICPT_IOINST:
 -              /* last instruction only stored for these icptcodes */
 -              return insn_length(vcpu->arch.sie_block->ipa >> 8);
 -      case ICPT_PROGI:
 -              return vcpu->arch.sie_block->pgmilc;
 -      default:
 -              return 0;
 -      }
 -}
 -
  static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
  {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@@ -572,7 -589,7 +573,7 @@@ static int __must_check __deliver_prog(
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
        struct kvm_s390_pgm_info pgm_info;
        int rc = 0, nullifying = false;
 -      u16 ilc = get_ilc(vcpu);
 +      u16 ilen;
  
        spin_lock(&li->lock);
        pgm_info = li->irq.pgm;
        memset(&li->irq.pgm, 0, sizeof(pgm_info));
        spin_unlock(&li->lock);
  
 -      VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
 -                 pgm_info.code, ilc);
 +      ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
 +      VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
 +                 pgm_info.code, ilen);
        vcpu->stat.deliver_program_int++;
        trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
                                         pgm_info.code, 0);
                                   (u8 *) __LC_PER_ACCESS_ID);
        }
  
 -      if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
 -              kvm_s390_rewind_psw(vcpu, ilc);
 +      if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
 +              kvm_s390_rewind_psw(vcpu, ilen);
  
 -      rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
 +      /* bit 1+2 of the target are the ilc, so we can directly use ilen */
 +      rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
        rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
                                 (u64 *) __LC_LAST_BREAK);
        rc |= put_guest_lc(vcpu, pgm_info.code,
@@@ -909,35 -924,9 +910,35 @@@ int kvm_cpu_has_pending_timer(struct kv
        return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
  }
  
 +static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
 +{
 +      u64 now, cputm, sltime = 0;
 +
 +      if (ckc_interrupts_enabled(vcpu)) {
 +              now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
 +              sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 +              /* already expired or overflow? */
 +              if (!sltime || vcpu->arch.sie_block->ckc <= now)
 +                      return 0;
 +              if (cpu_timer_interrupts_enabled(vcpu)) {
 +                      cputm = kvm_s390_get_cpu_timer(vcpu);
 +                      /* already expired? */
 +                      if (cputm >> 63)
 +                              return 0;
 +                      return min(sltime, tod_to_ns(cputm));
 +              }
 +      } else if (cpu_timer_interrupts_enabled(vcpu)) {
 +              sltime = kvm_s390_get_cpu_timer(vcpu);
 +              /* already expired? */
 +              if (sltime >> 63)
 +                      return 0;
 +      }
 +      return sltime;
 +}
 +
  int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
  {
 -      u64 now, sltime;
 +      u64 sltime;
  
        vcpu->stat.exit_wait_state++;
  
                return -EOPNOTSUPP; /* disabled wait */
        }
  
 -      if (!ckc_interrupts_enabled(vcpu)) {
 +      if (!ckc_interrupts_enabled(vcpu) &&
 +          !cpu_timer_interrupts_enabled(vcpu)) {
                VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
                __set_cpu_idle(vcpu);
                goto no_timer;
        }
  
 -      now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
 -      sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 -
 -      /* underflow */
 -      if (vcpu->arch.sie_block->ckc < now)
 +      sltime = __calculate_sltime(vcpu);
 +      if (!sltime)
                return 0;
  
        __set_cpu_idle(vcpu);
        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
 -      VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
 +      VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
  no_timer:
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        kvm_vcpu_block(vcpu);
  
  void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
  {
 -      if (waitqueue_active(&vcpu->wq)) {
 +      if (swait_active(&vcpu->wq)) {
                /*
                 * The vcpu gave up the cpu voluntarily, mark it as a good
                 * yield-candidate.
                 */
                vcpu->preempted = true;
 -              wake_up_interruptible(&vcpu->wq);
 +              swake_up(&vcpu->wq);
                vcpu->stat.halt_wakeup++;
        }
  }
  enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
  {
        struct kvm_vcpu *vcpu;
 -      u64 now, sltime;
 +      u64 sltime;
  
        vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
 -      now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
 -      sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 +      sltime = __calculate_sltime(vcpu);
  
        /*
         * If the monotonic clock runs faster than the tod clock we might be
         * woken up too early and have to go back to sleep to avoid deadlocks.
         */
 -      if (vcpu->arch.sie_block->ckc > now &&
 -          hrtimer_forward_now(timer, ns_to_ktime(sltime)))
 +      if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
                return HRTIMER_RESTART;
        kvm_s390_vcpu_wakeup(vcpu);
        return HRTIMER_NORESTART;
@@@ -1067,16 -1060,8 +1068,16 @@@ static int __inject_prog(struct kvm_vcp
        trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
                                   irq->u.pgm.code, 0);
  
 +      if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
 +              /* auto detection if no valid ILC was given */
 +              irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
 +              irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
 +              irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
 +      }
 +
        if (irq->u.pgm.code == PGM_PER) {
                li->irq.pgm.code |= PGM_PER;
 +              li->irq.pgm.flags = irq->u.pgm.flags;
                /* only modify PER related information */
                li->irq.pgm.per_address = irq->u.pgm.per_address;
                li->irq.pgm.per_code = irq->u.pgm.per_code;
        } else if (!(irq->u.pgm.code & PGM_PER)) {
                li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
                                   irq->u.pgm.code;
 +              li->irq.pgm.flags = irq->u.pgm.flags;
                /* only modify non-PER information */
                li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
                li->irq.pgm.mon_code = irq->u.pgm.mon_code;
diff --combined arch/s390/kvm/kvm-s390.c
index e196582fe87d4631ab9335b57c7d21f3f4a2ba0f,be1f0288443edae18396a79eb2353ce9c99854d9..668c087513e597158a16b20d79beaf7d0cd694d0
@@@ -30,6 -30,7 +30,7 @@@
  #include <asm/lowcore.h>
  #include <asm/etr.h>
  #include <asm/pgtable.h>
+ #include <asm/gmap.h>
  #include <asm/nmi.h>
  #include <asm/switch_to.h>
  #include <asm/isc.h>
@@@ -158,8 -159,6 +159,8 @@@ static int kvm_clock_sync(struct notifi
                kvm->arch.epoch -= *delta;
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        vcpu->arch.sie_block->epoch -= *delta;
 +                      if (vcpu->arch.cputm_enabled)
 +                              vcpu->arch.cputm_start += *delta;
                }
        }
        return NOTIFY_OK;
@@@ -276,17 -275,16 +277,17 @@@ static void kvm_s390_sync_dirty_log(str
        unsigned long address;
        struct gmap *gmap = kvm->arch.gmap;
  
 -      down_read(&gmap->mm->mmap_sem);
        /* Loop over all guest pages */
        last_gfn = memslot->base_gfn + memslot->npages;
        for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
                address = gfn_to_hva_memslot(memslot, cur_gfn);
  
-               if (gmap_test_and_clear_dirty(address, gmap))
+               if (test_and_clear_guest_dirty(gmap->mm, address))
                        mark_page_dirty(kvm, cur_gfn);
 +              if (fatal_signal_pending(current))
 +                      return;
 +              cond_resched();
        }
 -      up_read(&gmap->mm->mmap_sem);
  }
  
  /* Section: vm related */
@@@ -355,8 -353,8 +356,8 @@@ static int kvm_vm_ioctl_enable_cap(stru
                if (atomic_read(&kvm->online_vcpus)) {
                        r = -EBUSY;
                } else if (MACHINE_HAS_VX) {
 -                      set_kvm_facility(kvm->arch.model.fac->mask, 129);
 -                      set_kvm_facility(kvm->arch.model.fac->list, 129);
 +                      set_kvm_facility(kvm->arch.model.fac_mask, 129);
 +                      set_kvm_facility(kvm->arch.model.fac_list, 129);
                        r = 0;
                } else
                        r = -EINVAL;
                if (atomic_read(&kvm->online_vcpus)) {
                        r = -EBUSY;
                } else if (test_facility(64)) {
 -                      set_kvm_facility(kvm->arch.model.fac->mask, 64);
 -                      set_kvm_facility(kvm->arch.model.fac->list, 64);
 +                      set_kvm_facility(kvm->arch.model.fac_mask, 64);
 +                      set_kvm_facility(kvm->arch.model.fac_list, 64);
                        r = 0;
                }
                mutex_unlock(&kvm->lock);
@@@ -654,7 -652,7 +655,7 @@@ static int kvm_s390_set_processor(struc
                memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
                       sizeof(struct cpuid));
                kvm->arch.model.ibc = proc->ibc;
 -              memcpy(kvm->arch.model.fac->list, proc->fac_list,
 +              memcpy(kvm->arch.model.fac_list, proc->fac_list,
                       S390_ARCH_FAC_LIST_SIZE_BYTE);
        } else
                ret = -EFAULT;
@@@ -688,8 -686,7 +689,8 @@@ static int kvm_s390_get_processor(struc
        }
        memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
        proc->ibc = kvm->arch.model.ibc;
 -      memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
 +      memcpy(&proc->fac_list, kvm->arch.model.fac_list,
 +             S390_ARCH_FAC_LIST_SIZE_BYTE);
        if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
                ret = -EFAULT;
        kfree(proc);
@@@ -709,7 -706,7 +710,7 @@@ static int kvm_s390_get_machine(struct 
        }
        get_cpu_id((struct cpuid *) &mach->cpuid);
        mach->ibc = sclp.ibc;
 -      memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
 +      memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
        memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
@@@ -1086,12 -1083,16 +1087,12 @@@ static void kvm_s390_get_cpu_id(struct 
        cpu_id->version = 0xff;
  }
  
 -static int kvm_s390_crypto_init(struct kvm *kvm)
 +static void kvm_s390_crypto_init(struct kvm *kvm)
  {
        if (!test_kvm_facility(kvm, 76))
 -              return 0;
 -
 -      kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
 -                                       GFP_KERNEL | GFP_DMA);
 -      if (!kvm->arch.crypto.crycb)
 -              return -ENOMEM;
 +              return;
  
 +      kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
        kvm_s390_set_crycb_format(kvm);
  
        /* Enable AES/DEA protected key functions by default */
                         sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
        get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
                         sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
 -
 -      return 0;
  }
  
  static void sca_dispose(struct kvm *kvm)
@@@ -1154,30 -1157,37 +1155,30 @@@ int kvm_arch_init_vm(struct kvm *kvm, u
        if (!kvm->arch.dbf)
                goto out_err;
  
 -      /*
 -       * The architectural maximum amount of facilities is 16 kbit. To store
 -       * this amount, 2 kbyte of memory is required. Thus we need a full
 -       * page to hold the guest facility list (arch.model.fac->list) and the
 -       * facility mask (arch.model.fac->mask). Its address size has to be
 -       * 31 bits and word aligned.
 -       */
 -      kvm->arch.model.fac =
 -              (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 -      if (!kvm->arch.model.fac)
 +      kvm->arch.sie_page2 =
 +           (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 +      if (!kvm->arch.sie_page2)
                goto out_err;
  
        /* Populate the facility mask initially. */
 -      memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
 +      memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
        for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
                if (i < kvm_s390_fac_list_mask_size())
 -                      kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
 +                      kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
                else
 -                      kvm->arch.model.fac->mask[i] = 0UL;
 +                      kvm->arch.model.fac_mask[i] = 0UL;
        }
  
        /* Populate the facility list initially. */
 -      memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
 +      kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
 +      memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
  
        kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
        kvm->arch.model.ibc = sclp.ibc & 0x0fff;
  
 -      if (kvm_s390_crypto_init(kvm) < 0)
 -              goto out_err;
 +      kvm_s390_crypto_init(kvm);
  
        spin_lock_init(&kvm->arch.float_int.lock);
        for (i = 0; i < FIRQ_LIST_COUNT; i++)
  
        return 0;
  out_err:
 -      kfree(kvm->arch.crypto.crycb);
 -      free_page((unsigned long)kvm->arch.model.fac);
 +      free_page((unsigned long)kvm->arch.sie_page2);
        debug_unregister(kvm->arch.dbf);
        sca_dispose(kvm);
        KVM_EVENT(3, "creation of vm failed: %d", rc);
@@@ -1259,9 -1270,10 +1260,9 @@@ static void kvm_free_vcpus(struct kvm *
  void kvm_arch_destroy_vm(struct kvm *kvm)
  {
        kvm_free_vcpus(kvm);
 -      free_page((unsigned long)kvm->arch.model.fac);
        sca_dispose(kvm);
        debug_unregister(kvm->arch.dbf);
 -      kfree(kvm->arch.crypto.crycb);
 +      free_page((unsigned long)kvm->arch.sie_page2);
        if (!kvm_is_ucontrol(kvm))
                gmap_free(kvm->arch.gmap);
        kvm_s390_destroy_adapters(kvm);
@@@ -1403,13 -1415,8 +1404,13 @@@ int kvm_arch_vcpu_init(struct kvm_vcpu 
                                    KVM_SYNC_PFAULT;
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
 -      if (test_kvm_facility(vcpu->kvm, 129))
 +      /* fprs can be synchronized via vrs, even if the guest has no vx. With
 +       * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
 +       */
 +      if (MACHINE_HAS_VX)
                vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
 +      else
 +              vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
  
        if (kvm_is_ucontrol(vcpu->kvm))
                return __kvm_ucontrol_vcpu_init(vcpu);
        return 0;
  }
  
 +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 +static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
 +      raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
 +      vcpu->arch.cputm_start = get_tod_clock_fast();
 +      raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
 +}
 +
 +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 +static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
 +      raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
 +      vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
 +      vcpu->arch.cputm_start = 0;
 +      raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
 +}
 +
 +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 +static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      WARN_ON_ONCE(vcpu->arch.cputm_enabled);
 +      vcpu->arch.cputm_enabled = true;
 +      __start_cpu_timer_accounting(vcpu);
 +}
 +
 +/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
 +static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
 +      __stop_cpu_timer_accounting(vcpu);
 +      vcpu->arch.cputm_enabled = false;
 +}
 +
 +static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      preempt_disable(); /* protect from TOD sync and vcpu_load/put */
 +      __enable_cpu_timer_accounting(vcpu);
 +      preempt_enable();
 +}
 +
 +static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
 +{
 +      preempt_disable(); /* protect from TOD sync and vcpu_load/put */
 +      __disable_cpu_timer_accounting(vcpu);
 +      preempt_enable();
 +}
 +
 +/* set the cpu timer - may only be called from the VCPU thread itself */
 +void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
 +{
 +      preempt_disable(); /* protect from TOD sync and vcpu_load/put */
 +      raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
 +      if (vcpu->arch.cputm_enabled)
 +              vcpu->arch.cputm_start = get_tod_clock_fast();
 +      vcpu->arch.sie_block->cputm = cputm;
 +      raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
 +      preempt_enable();
 +}
 +
 +/* update and get the cpu timer - can also be called from other VCPU threads */
 +__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
 +{
 +      unsigned int seq;
 +      __u64 value;
 +
 +      if (unlikely(!vcpu->arch.cputm_enabled))
 +              return vcpu->arch.sie_block->cputm;
 +
 +      preempt_disable(); /* protect from TOD sync and vcpu_load/put */
 +      do {
 +              seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
 +              /*
 +               * If the writer would ever execute a read in the critical
 +               * section, e.g. in irq context, we have a deadlock.
 +               */
 +              WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
 +              value = vcpu->arch.sie_block->cputm;
 +              /* if cputm_start is 0, accounting is being started/stopped */
 +              if (likely(vcpu->arch.cputm_start))
 +                      value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
 +      } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
 +      preempt_enable();
 +      return value;
 +}
 +
  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  {
        /* Save host register state */
        vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
        vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
  
 -      /* Depending on MACHINE_HAS_VX, data stored to vrs either
 -       * has vector register or floating point register format.
 -       */
 -      current->thread.fpu.regs = vcpu->run->s.regs.vrs;
 +      if (MACHINE_HAS_VX)
 +              current->thread.fpu.regs = vcpu->run->s.regs.vrs;
 +      else
 +              current->thread.fpu.regs = vcpu->run->s.regs.fprs;
        current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
        if (test_fp_ctl(current->thread.fpu.fpc))
                /* User space provided an invalid FPC, let's clear it */
        restore_access_regs(vcpu->run->s.regs.acrs);
        gmap_enable(vcpu->arch.gmap);
        atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 +      if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
 +              __start_cpu_timer_accounting(vcpu);
 +      vcpu->cpu = cpu;
  }
  
  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  {
 +      vcpu->cpu = -1;
 +      if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
 +              __stop_cpu_timer_accounting(vcpu);
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
  
@@@ -1555,7 -1469,7 +1556,7 @@@ static void kvm_s390_vcpu_initial_reset
        vcpu->arch.sie_block->gpsw.mask = 0UL;
        vcpu->arch.sie_block->gpsw.addr = 0UL;
        kvm_s390_set_prefix(vcpu, 0);
 -      vcpu->arch.sie_block->cputm     = 0UL;
 +      kvm_s390_set_cpu_timer(vcpu, 0);
        vcpu->arch.sie_block->ckc       = 0UL;
        vcpu->arch.sie_block->todpr     = 0;
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
@@@ -1625,8 -1539,7 +1626,8 @@@ static void kvm_s390_vcpu_setup_model(s
  
        vcpu->arch.cpu_id = model->cpu_id;
        vcpu->arch.sie_block->ibc = model->ibc;
 -      vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
 +      if (test_kvm_facility(vcpu->kvm, 7))
 +              vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
  }
  
  int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
@@@ -1704,7 -1617,6 +1705,7 @@@ struct kvm_vcpu *kvm_arch_vcpu_create(s
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 +      seqcount_init(&vcpu->arch.cputm_seqcount);
  
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
@@@ -1804,7 -1716,7 +1805,7 @@@ static int kvm_arch_vcpu_ioctl_get_one_
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CPU_TIMER:
 -              r = put_user(vcpu->arch.sie_block->cputm,
 +              r = put_user(kvm_s390_get_cpu_timer(vcpu),
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CLOCK_COMP:
@@@ -1842,7 -1754,6 +1843,7 @@@ static int kvm_arch_vcpu_ioctl_set_one_
                                           struct kvm_one_reg *reg)
  {
        int r = -EINVAL;
 +      __u64 val;
  
        switch (reg->id) {
        case KVM_REG_S390_TODPR:
                             (u64 __user *)reg->addr);
                break;
        case KVM_REG_S390_CPU_TIMER:
 -              r = get_user(vcpu->arch.sie_block->cputm,
 -                           (u64 __user *)reg->addr);
 +              r = get_user(val, (u64 __user *)reg->addr);
 +              if (!r)
 +                      kvm_s390_set_cpu_timer(vcpu, val);
                break;
        case KVM_REG_S390_CLOCK_COMP:
                r = get_user(vcpu->arch.sie_block->ckc,
@@@ -2249,10 -2159,8 +2250,10 @@@ static int vcpu_pre_run(struct kvm_vcp
  
  static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
  {
 -      psw_t *psw = &vcpu->arch.sie_block->gpsw;
 -      u8 opcode;
 +      struct kvm_s390_pgm_info pgm_info = {
 +              .code = PGM_ADDRESSING,
 +      };
 +      u8 opcode, ilen;
        int rc;
  
        VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
         * to look up the current opcode to get the length of the instruction
         * to be able to forward the PSW.
         */
 -      rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
 -      if (rc)
 -              return kvm_s390_inject_prog_cond(vcpu, rc);
 -      psw->addr = __rewind_psw(*psw, -insn_length(opcode));
 -
 -      return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 +      rc = read_guest_instr(vcpu, &opcode, 1);
 +      ilen = insn_length(opcode);
 +      if (rc < 0) {
 +              return rc;
 +      } else if (rc) {
 +              /* Instruction-Fetching Exceptions - we can't detect the ilen.
 +               * Forward by arbitrary ilc, injection will take care of
 +               * nullification if necessary.
 +               */
 +              pgm_info = vcpu->arch.pgm;
 +              ilen = 4;
 +      }
 +      pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
 +      kvm_s390_forward_psw(vcpu, ilen);
 +      return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
  }
  
  static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@@ -2346,12 -2245,10 +2347,12 @@@ static int __vcpu_run(struct kvm_vcpu *
                 */
                local_irq_disable();
                __kvm_guest_enter();
 +              __disable_cpu_timer_accounting(vcpu);
                local_irq_enable();
                exit_reason = sie64a(vcpu->arch.sie_block,
                                     vcpu->run->s.regs.gprs);
                local_irq_disable();
 +              __enable_cpu_timer_accounting(vcpu);
                __kvm_guest_exit();
                local_irq_enable();
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@@ -2375,7 -2272,7 +2376,7 @@@ static void sync_regs(struct kvm_vcpu *
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
        if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
 -              vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
 +              kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
                vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
                vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
                vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
@@@ -2397,7 -2294,7 +2398,7 @@@ static void store_regs(struct kvm_vcpu 
        kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
        kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
        memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
 -      kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
 +      kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
        kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
        kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
        kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
@@@ -2429,7 -2326,6 +2430,7 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
        }
  
        sync_regs(vcpu, kvm_run);
 +      enable_cpu_timer_accounting(vcpu);
  
        might_fault();
        rc = __vcpu_run(vcpu);
                rc = 0;
        }
  
 +      disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu, kvm_run);
  
        if (vcpu->sigset_active)
@@@ -2470,7 -2365,7 +2471,7 @@@ int kvm_s390_store_status_unloaded(stru
        unsigned char archmode = 1;
        freg_t fprs[NUM_FPRS];
        unsigned int px;
 -      u64 clkcomp;
 +      u64 clkcomp, cputm;
        int rc;
  
        px = kvm_s390_get_prefix(vcpu);
  
        /* manually convert vector registers if necessary */
        if (MACHINE_HAS_VX) {
 -              convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
 +              convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
                rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
                                     fprs, 128);
        } else {
                rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
 -                                   vcpu->run->s.regs.vrs, 128);
 +                                   vcpu->run->s.regs.fprs, 128);
        }
        rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
                              vcpu->run->s.regs.gprs, 128);
                              &vcpu->run->s.regs.fpc, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
                              &vcpu->arch.sie_block->todpr, 4);
 +      cputm = kvm_s390_get_cpu_timer(vcpu);
        rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
 -                            &vcpu->arch.sie_block->cputm, 8);
 +                            &cputm, 8);
        clkcomp = vcpu->arch.sie_block->ckc >> 8;
        rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
                              &clkcomp, 8);
@@@ -2712,8 -2606,7 +2713,8 @@@ static long kvm_s390_guest_mem_op(struc
        switch (mop->op) {
        case KVM_S390_MEMOP_LOGICAL_READ:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
 -                      r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
 +                      r = check_gva_range(vcpu, mop->gaddr, mop->ar,
 +                                          mop->size, GACC_FETCH);
                        break;
                }
                r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
                break;
        case KVM_S390_MEMOP_LOGICAL_WRITE:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
 -                      r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
 +                      r = check_gva_range(vcpu, mop->gaddr, mop->ar,
 +                                          mop->size, GACC_STORE);
                        break;
                }
                if (copy_from_user(tmpbuf, uaddr, mop->size)) {
diff --combined arch/s390/kvm/priv.c
index f218ccf016c87659e6be3b4bcc8de05e71ddf3ff,b632d8dda9adb791f6133fc2a88400b8ae92ea12..0a1591d3d25d07a6c2e98e406f77bb391ebb7c1a
@@@ -23,6 -23,7 +23,7 @@@
  #include <asm/sysinfo.h>
  #include <asm/pgtable.h>
  #include <asm/pgalloc.h>
+ #include <asm/gmap.h>
  #include <asm/io.h>
  #include <asm/ptrace.h>
  #include <asm/compat.h>
@@@ -173,7 -174,7 +174,7 @@@ static int handle_skey(struct kvm_vcpu 
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  
 -      kvm_s390_rewind_psw(vcpu, 4);
 +      kvm_s390_retry_instr(vcpu);
        VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
        return 0;
  }
@@@ -184,7 -185,7 +185,7 @@@ static int handle_ipte_interlock(struc
        if (psw_bits(vcpu->arch.sie_block->gpsw).p)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
        wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
 -      kvm_s390_rewind_psw(vcpu, 4);
 +      kvm_s390_retry_instr(vcpu);
        VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
        return 0;
  }
@@@ -354,7 -355,7 +355,7 @@@ static int handle_stfl(struct kvm_vcpu 
         * We need to shift the lower 32 facility bits (bit 0-31) from a u64
         * into a u32 memory representation. They will remain bits 0-31.
         */
 -      fac = *vcpu->kvm->arch.model.fac->list >> 32;
 +      fac = *vcpu->kvm->arch.model.fac_list >> 32;
        rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
                            &fac, sizeof(fac));
        if (rc)
@@@ -759,8 -760,8 +760,8 @@@ static int handle_essa(struct kvm_vcpu 
        if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  
 -      /* Rewind PSW to repeat the ESSA instruction */
 -      kvm_s390_rewind_psw(vcpu, 4);
 +      /* Retry the ESSA instruction */
 +      kvm_s390_retry_instr(vcpu);
        vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
        cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
        down_read(&gmap->mm->mmap_sem);
@@@ -981,12 -982,11 +982,12 @@@ static int handle_tprot(struct kvm_vcp
                return -EOPNOTSUPP;
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
                ipte_lock(vcpu);
 -      ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
 +      ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
        if (ret == PGM_PROTECTION) {
                /* Write protected? Try again with read-only... */
                cc = 1;
 -              ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
 +              ret = guest_translate_address(vcpu, address1, ar, &gpa,
 +                                            GACC_FETCH);
        }
        if (ret) {
                if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
index 277b5c8c825ca4a63864a01ba67cbd8765b5a041,bb2d26a408341874025b2bcf5458b8384e145b71..5667146c6a0ae3c3c513a3180921ff5986cb79ea
@@@ -67,7 -67,7 +67,7 @@@ static const u8 DASD_DIAG_CMS1[] = { 0x
   * and function code cmd.
   * In case of an exception return 3. Otherwise return result of bitwise OR of
   * resulting condition code and DIAG return code. */
 -static inline int dia250(void *iob, int cmd)
 +static inline int __dia250(void *iob, int cmd)
  {
        register unsigned long reg2 asm ("2") = (unsigned long) iob;
        typedef union {
@@@ -77,6 -77,7 +77,6 @@@
        int rc;
  
        rc = 3;
 -      diag_stat_inc(DIAG_STAT_X250);
        asm volatile(
                "       diag    2,%2,0x250\n"
                "0:     ipm     %0\n"
        return rc;
  }
  
 +static inline int dia250(void *iob, int cmd)
 +{
 +      diag_stat_inc(DIAG_STAT_X250);
 +      return __dia250(iob, cmd);
 +}
 +
  /* Initialize block I/O to DIAG device using the specified blocksize and
   * block offset. On success, return zero and set end_block to contain the
   * number of blocks on the device minus the specified offset. Return non-zero
@@@ -104,12 -99,10 +104,10 @@@ static inline in
  mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
             blocknum_t offset, blocknum_t *end_block)
  {
-       struct dasd_diag_private *private;
-       struct dasd_diag_init_io *iib;
+       struct dasd_diag_private *private = device->private;
+       struct dasd_diag_init_io *iib = &private->iib;
        int rc;
  
-       private = (struct dasd_diag_private *) device->private;
-       iib = &private->iib;
        memset(iib, 0, sizeof (struct dasd_diag_init_io));
  
        iib->dev_nr = private->dev_id.devno;
  static inline int
  mdsk_term_io(struct dasd_device * device)
  {
-       struct dasd_diag_private *private;
-       struct dasd_diag_init_io *iib;
+       struct dasd_diag_private *private = device->private;
+       struct dasd_diag_init_io *iib = &private->iib;
        int rc;
  
-       private = (struct dasd_diag_private *) device->private;
-       iib = &private->iib;
        memset(iib, 0, sizeof (struct dasd_diag_init_io));
        iib->dev_nr = private->dev_id.devno;
        rc = dia250(iib, TERM_BIO);
@@@ -153,14 -144,13 +149,13 @@@ dasd_diag_erp(struct dasd_device *devic
        rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
        if (rc == 4) {
                if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
-                       pr_warning("%s: The access mode of a DIAG device "
-                                  "changed to read-only\n",
-                                  dev_name(&device->cdev->dev));
+                       pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
+                               dev_name(&device->cdev->dev));
                rc = 0;
        }
        if (rc)
-               pr_warning("%s: DIAG ERP failed with "
-                           "rc=%d\n", dev_name(&device->cdev->dev), rc);
+               pr_warn("%s: DIAG ERP failed with rc=%d\n",
+                       dev_name(&device->cdev->dev), rc);
  }
  
  /* Start a given request at the device. Return zero on success, non-zero
@@@ -180,8 -170,8 +175,8 @@@ dasd_start_diag(struct dasd_ccw_req * c
                cqr->status = DASD_CQR_ERROR;
                return -EIO;
        }
-       private = (struct dasd_diag_private *) device->private;
-       dreq = (struct dasd_diag_req *) cqr->data;
+       private = device->private;
+       dreq = cqr->data;
  
        private->iob.dev_nr = private->dev_id.devno;
        private->iob.key = 0;
@@@ -320,18 -310,17 +315,17 @@@ static void dasd_ext_handler(struct ext
  static int
  dasd_diag_check_device(struct dasd_device *device)
  {
-       struct dasd_block *block;
-       struct dasd_diag_private *private;
+       struct dasd_diag_private *private = device->private;
        struct dasd_diag_characteristics *rdc_data;
-       struct dasd_diag_bio bio;
        struct vtoc_cms_label *label;
-       blocknum_t end_block;
+       struct dasd_block *block;
+       struct dasd_diag_bio bio;
        unsigned int sb, bsize;
+       blocknum_t end_block;
        int rc;
  
-       private = (struct dasd_diag_private *) device->private;
        if (private == NULL) {
-               private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+               private = kzalloc(sizeof(*private), GFP_KERNEL);
                if (private == NULL) {
                        DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                                "Allocating memory for private DASD data "
                        return -ENOMEM;
                }
                ccw_device_get_id(device->cdev, &private->dev_id);
-               device->private = (void *) private;
+               device->private = private;
        }
        block = dasd_alloc_block();
        if (IS_ERR(block)) {
        block->base = device;
  
        /* Read Device Characteristics */
-       rdc_data = (void *) &(private->rdc_data);
+       rdc_data = &private->rdc_data;
        rdc_data->dev_nr = private->dev_id.devno;
        rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
  
                private->pt_block = 2;
                break;
        default:
-               pr_warning("%s: Device type %d is not supported "
-                          "in DIAG mode\n", dev_name(&device->cdev->dev),
-                          private->rdc_data.vdev_class);
+               pr_warn("%s: Device type %d is not supported in DIAG mode\n",
+                       dev_name(&device->cdev->dev),
+                       private->rdc_data.vdev_class);
                rc = -EOPNOTSUPP;
                goto out;
        }
                private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
                rc = dia250(&private->iob, RW_BIO);
                if (rc == 3) {
-                       pr_warning("%s: A 64-bit DIAG call failed\n",
-                                  dev_name(&device->cdev->dev));
+                       pr_warn("%s: A 64-bit DIAG call failed\n",
+                               dev_name(&device->cdev->dev));
                        rc = -EOPNOTSUPP;
                        goto out_label;
                }
                        break;
        }
        if (bsize > PAGE_SIZE) {
-               pr_warning("%s: Accessing the DASD failed because of an "
-                          "incorrect format (rc=%d)\n",
-                          dev_name(&device->cdev->dev), rc);
+               pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+                       dev_name(&device->cdev->dev), rc);
                rc = -EIO;
                goto out_label;
        }
                block->s2b_shift++;
        rc = mdsk_init_io(device, block->bp_block, 0, NULL);
        if (rc && (rc != 4)) {
-               pr_warning("%s: DIAG initialization failed with rc=%d\n",
-                          dev_name(&device->cdev->dev), rc);
+               pr_warn("%s: DIAG initialization failed with rc=%d\n",
+                       dev_name(&device->cdev->dev), rc);
                rc = -EIO;
        } else {
                if (rc == 4)
@@@ -601,16 -589,14 +594,14 @@@ static in
  dasd_diag_fill_info(struct dasd_device * device,
                    struct dasd_information2_t * info)
  {
-       struct dasd_diag_private *private;
+       struct dasd_diag_private *private = device->private;
  
-       private = (struct dasd_diag_private *) device->private;
        info->label_block = (unsigned int) private->pt_block;
        info->FBA_layout = 1;
        info->format = DASD_FORMAT_LDL;
-       info->characteristics_size = sizeof (struct dasd_diag_characteristics);
-       memcpy(info->characteristics,
-              &((struct dasd_diag_private *) device->private)->rdc_data,
-              sizeof (struct dasd_diag_characteristics));
+       info->characteristics_size = sizeof(private->rdc_data);
+       memcpy(info->characteristics, &private->rdc_data,
+              sizeof(private->rdc_data));
        info->confdata_size = 0;
        return 0;
  }
This page took 0.118775 seconds and 4 git commands to generate.